PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/sysattr.h"
#include "access/transam.h"
#include "access/tuptoaster.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "catalog/namespace.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/tqual.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define HEAPDEBUG_1
 
#define HEAPDEBUG_2
 
#define HEAPDEBUG_3
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Functions

static HeapScanDesc heap_beginscan_internal (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
 
static void heap_parallelscan_startblock_init (HeapScanDesc scan)
 
static BlockNumber heap_parallelscan_nextpage (HeapScanDesc scan)
 
static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tup, bool all_visible_cleared, bool new_all_visible_cleared)
 
static BitmapsetHeapDetermineModifiedColumns (Relation relation, Bitmapset *interesting_cols, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static HTSU_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_modified, bool *copy)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (HeapScanDesc scan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
Relation relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation try_relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation relation_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation relation_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
void relation_close (Relation relation, LOCKMODE lockmode)
 
Relation heap_open (Oid relationId, LOCKMODE lockmode)
 
Relation heap_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation heap_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
HeapScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_catalog (Relation relation, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_strat (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync)
 
HeapScanDesc heap_beginscan_bm (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_sampling (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_rescan (HeapScanDesc scan, ScanKey key)
 
void heap_rescan_set_params (HeapScanDesc scan, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (HeapScanDesc scan)
 
Size heap_parallelscan_estimate (Snapshot snapshot)
 
void heap_parallelscan_initialize (ParallelHeapScanDesc target, Relation relation, Snapshot snapshot)
 
void heap_parallelscan_reinitialize (ParallelHeapScanDesc parallel_scan)
 
HeapScanDesc heap_beginscan_parallel (Relation relation, ParallelHeapScanDesc parallel_scan)
 
void heap_update_snapshot (HeapScanDesc scan, Snapshot snapshot)
 
HeapTuple heap_getnext (HeapScanDesc scan, ScanDirection direction)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, Relation stats_relation)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
bool heap_hot_search (ItemPointer tid, Relation relation, Snapshot snapshot, bool *all_dead)
 
void heap_get_latest_tid (Relation relation, Snapshot snapshot, ItemPointer tid)
 
bool HeapTupleUpdateXmaxMatchesXmin (TransactionId xmax, HeapTupleHeader htup)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
Oid heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, HeapTuple *tuples, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
Oid simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
HTSU_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
HTSU_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
HTSU_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
 
static HTSU_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, bool *needwait)
 
static HTSU_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, HeapTuple tuple)
 
void heap_abort_speculative (Relation relation, HeapTuple tuple)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
XLogRecPtr log_heap_cleanup_info (RelFileNode rnode, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_clean (Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_cleanup_info (XLogReaderState *record)
 
static void heap_xlog_clean (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_sync (Relation rel)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 

Variables

bool synchronize_seqscans = true
 
struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 185 of file heapam.c.

Referenced by heap_acquire_tuplock().

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 6374 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_MARK_COMMITTED   0x0010

Definition at line 6377 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_NOOP   0x0001

Definition at line 6373 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 6376 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_RETURN_IS_XID   0x0004

Definition at line 6375 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define HEAPDEBUG_1

Definition at line 1801 of file heapam.c.

Referenced by heap_getnext().

#define HEAPDEBUG_2

Definition at line 1802 of file heapam.c.

Referenced by heap_getnext().

#define HEAPDEBUG_3

Definition at line 1803 of file heapam.c.

Referenced by heap_getnext().

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 181 of file heapam.c.

Referenced by heap_acquire_tuplock().

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 203 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 183 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Function Documentation

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 3014 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

3015 {
3016  return
3017  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
3018  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
3019  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
3020  /* note we ignore HEAP_XMAX_SHR_LOCK here */
3021  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
3022  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
3023  XLHL_KEYS_UPDATED : 0);
3024 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243
static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 5318 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

5323 {
5324  TransactionId new_xmax;
5325  uint16 new_infomask,
5326  new_infomask2;
5327 
5329 
5330 l5:
5331  new_infomask = 0;
5332  new_infomask2 = 0;
5333  if (old_infomask & HEAP_XMAX_INVALID)
5334  {
5335  /*
5336  * No previous locker; we just insert our own TransactionId.
5337  *
5338  * Note that it's critical that this case be the first one checked,
5339  * because there are several blocks below that come back to this one
5340  * to implement certain optimizations; old_infomask might contain
5341  * other dirty bits in those cases, but we don't really care.
5342  */
5343  if (is_update)
5344  {
5345  new_xmax = add_to_xmax;
5346  if (mode == LockTupleExclusive)
5347  new_infomask2 |= HEAP_KEYS_UPDATED;
5348  }
5349  else
5350  {
5351  new_infomask |= HEAP_XMAX_LOCK_ONLY;
5352  switch (mode)
5353  {
5354  case LockTupleKeyShare:
5355  new_xmax = add_to_xmax;
5356  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5357  break;
5358  case LockTupleShare:
5359  new_xmax = add_to_xmax;
5360  new_infomask |= HEAP_XMAX_SHR_LOCK;
5361  break;
5363  new_xmax = add_to_xmax;
5364  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5365  break;
5366  case LockTupleExclusive:
5367  new_xmax = add_to_xmax;
5368  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5369  new_infomask2 |= HEAP_KEYS_UPDATED;
5370  break;
5371  default:
5372  new_xmax = InvalidTransactionId; /* silence compiler */
5373  elog(ERROR, "invalid lock mode");
5374  }
5375  }
5376  }
5377  else if (old_infomask & HEAP_XMAX_IS_MULTI)
5378  {
5379  MultiXactStatus new_status;
5380 
5381  /*
5382  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5383  * cross-check.
5384  */
5385  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5386 
5387  /*
5388  * A multixact together with LOCK_ONLY set but neither lock bit set
5389  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5390  * anymore. This check is critical for databases upgraded by
5391  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5392  * that such multis are never passed.
5393  */
5394  if (HEAP_LOCKED_UPGRADED(old_infomask))
5395  {
5396  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5397  old_infomask |= HEAP_XMAX_INVALID;
5398  goto l5;
5399  }
5400 
5401  /*
5402  * If the XMAX is already a MultiXactId, then we need to expand it to
5403  * include add_to_xmax; but if all the members were lockers and are
5404  * all gone, we can do away with the IS_MULTI bit and just set
5405  * add_to_xmax as the only locker/updater. If all lockers are gone
5406  * and we have an updater that aborted, we can also do without a
5407  * multi.
5408  *
5409  * The cost of doing GetMultiXactIdMembers would be paid by
5410  * MultiXactIdExpand if we weren't to do this, so this check is not
5411  * incurring extra work anyhow.
5412  */
5413  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5414  {
5415  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5417  old_infomask)))
5418  {
5419  /*
5420  * Reset these bits and restart; otherwise fall through to
5421  * create a new multi below.
5422  */
5423  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5424  old_infomask |= HEAP_XMAX_INVALID;
5425  goto l5;
5426  }
5427  }
5428 
5429  new_status = get_mxact_status_for_lock(mode, is_update);
5430 
5431  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5432  new_status);
5433  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5434  }
5435  else if (old_infomask & HEAP_XMAX_COMMITTED)
5436  {
5437  /*
5438  * It's a committed update, so we need to preserve him as updater of
5439  * the tuple.
5440  */
5442  MultiXactStatus new_status;
5443 
5444  if (old_infomask2 & HEAP_KEYS_UPDATED)
5445  status = MultiXactStatusUpdate;
5446  else
5447  status = MultiXactStatusNoKeyUpdate;
5448 
5449  new_status = get_mxact_status_for_lock(mode, is_update);
5450 
5451  /*
5452  * since it's not running, it's obviously impossible for the old
5453  * updater to be identical to the current one, so we need not check
5454  * for that case as we do in the block above.
5455  */
5456  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5457  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5458  }
5459  else if (TransactionIdIsInProgress(xmax))
5460  {
5461  /*
5462  * If the XMAX is a valid, in-progress TransactionId, then we need to
5463  * create a new MultiXactId that includes both the old locker or
5464  * updater and our own TransactionId.
5465  */
5466  MultiXactStatus new_status;
5467  MultiXactStatus old_status;
5468  LockTupleMode old_mode;
5469 
5470  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5471  {
5472  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5473  old_status = MultiXactStatusForKeyShare;
5474  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5475  old_status = MultiXactStatusForShare;
5476  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5477  {
5478  if (old_infomask2 & HEAP_KEYS_UPDATED)
5479  old_status = MultiXactStatusForUpdate;
5480  else
5481  old_status = MultiXactStatusForNoKeyUpdate;
5482  }
5483  else
5484  {
5485  /*
5486  * LOCK_ONLY can be present alone only when a page has been
5487  * upgraded by pg_upgrade. But in that case,
5488  * TransactionIdIsInProgress() should have returned false. We
5489  * assume it's no longer locked in this case.
5490  */
5491  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5492  old_infomask |= HEAP_XMAX_INVALID;
5493  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5494  goto l5;
5495  }
5496  }
5497  else
5498  {
5499  /* it's an update, but which kind? */
5500  if (old_infomask2 & HEAP_KEYS_UPDATED)
5501  old_status = MultiXactStatusUpdate;
5502  else
5503  old_status = MultiXactStatusNoKeyUpdate;
5504  }
5505 
5506  old_mode = TUPLOCK_from_mxstatus(old_status);
5507 
5508  /*
5509  * If the lock to be acquired is for the same TransactionId as the
5510  * existing lock, there's an optimization possible: consider only the
5511  * strongest of both locks as the only one present, and restart.
5512  */
5513  if (xmax == add_to_xmax)
5514  {
5515  /*
5516  * Note that it's not possible for the original tuple to be
5517  * updated: we wouldn't be here because the tuple would have been
5518  * invisible and we wouldn't try to update it. As a subtlety,
5519  * this code can also run when traversing an update chain to lock
5520  * future versions of a tuple. But we wouldn't be here either,
5521  * because the add_to_xmax would be different from the original
5522  * updater.
5523  */
5524  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5525 
5526  /* acquire the strongest of both */
5527  if (mode < old_mode)
5528  mode = old_mode;
5529  /* mustn't touch is_update */
5530 
5531  old_infomask |= HEAP_XMAX_INVALID;
5532  goto l5;
5533  }
5534 
5535  /* otherwise, just fall back to creating a new multixact */
5536  new_status = get_mxact_status_for_lock(mode, is_update);
5537  new_xmax = MultiXactIdCreate(xmax, old_status,
5538  add_to_xmax, new_status);
5539  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5540  }
5541  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5542  TransactionIdDidCommit(xmax))
5543  {
5544  /*
5545  * It's a committed update, so we gotta preserve him as updater of the
5546  * tuple.
5547  */
5549  MultiXactStatus new_status;
5550 
5551  if (old_infomask2 & HEAP_KEYS_UPDATED)
5552  status = MultiXactStatusUpdate;
5553  else
5554  status = MultiXactStatusNoKeyUpdate;
5555 
5556  new_status = get_mxact_status_for_lock(mode, is_update);
5557 
5558  /*
5559  * since it's not running, it's obviously impossible for the old
5560  * updater to be identical to the current one, so we need not check
5561  * for that case as we do in the block above.
5562  */
5563  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5564  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5565  }
5566  else
5567  {
5568  /*
5569  * Can get here iff the locking/updating transaction was running when
5570  * the infomask was extracted from the tuple, but finished before
5571  * TransactionIdIsInProgress got to run. Deal with it as if there was
5572  * no locker at all in the first place.
5573  */
5574  old_infomask |= HEAP_XMAX_INVALID;
5575  goto l5;
5576  }
5577 
5578  *result_infomask = new_infomask;
5579  *result_infomask2 = new_infomask2;
5580  *result_xmax = new_xmax;
5581 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:6915
MultiXactStatus
Definition: multixact.h:40
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
uint32 TransactionId
Definition: c.h:391
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:192
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:185
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:248
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6996
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:257
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:384
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
TransactionId MultiXactId
Definition: c.h:401
#define Assert(condition)
Definition: c.h:681
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:203
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:4550
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:250
#define elog
Definition: elog.h:219
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:252
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:437
static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 7250 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

7252 {
7253  return Do_MultiXactIdWait(multi, status, infomask, true,
7254  rel, NULL, XLTW_None, remaining);
7255 }
int remaining
Definition: informix.c:692
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7150
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 7150 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

7154 {
7155  bool result = true;
7156  MultiXactMember *members;
7157  int nmembers;
7158  int remain = 0;
7159 
7160  /* for pre-pg_upgrade tuples, no need to sleep at all */
7161  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7162  GetMultiXactIdMembers(multi, &members, false,
7163  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7164 
7165  if (nmembers >= 0)
7166  {
7167  int i;
7168 
7169  for (i = 0; i < nmembers; i++)
7170  {
7171  TransactionId memxid = members[i].xid;
7172  MultiXactStatus memstatus = members[i].status;
7173 
7175  {
7176  remain++;
7177  continue;
7178  }
7179 
7182  {
7183  if (remaining && TransactionIdIsInProgress(memxid))
7184  remain++;
7185  continue;
7186  }
7187 
7188  /*
7189  * This member conflicts with our multi, so we have to sleep (or
7190  * return failure, if asked to avoid waiting.)
7191  *
7192  * Note that we don't set up an error context callback ourselves,
7193  * but instead we pass the info down to XactLockTableWait. This
7194  * might seem a bit wasteful because the context is set up and
7195  * tore down for each member of the multixact, but in reality it
7196  * should be barely noticeable, and it avoids duplicate code.
7197  */
7198  if (nowait)
7199  {
7200  result = ConditionalXactLockTableWait(memxid);
7201  if (!result)
7202  break;
7203  }
7204  else
7205  XactLockTableWait(memxid, rel, ctid, oper);
7206  }
7207 
7208  pfree(members);
7209  }
7210 
7211  if (remaining)
7212  *remaining = remain;
7213 
7214  return result;
7215 }
int remaining
Definition: informix.c:692
MultiXactStatus
Definition: multixact.h:40
uint32 TransactionId
Definition: c.h:391
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:173
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:607
void pfree(void *pointer)
Definition: mcxt.c:949
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
MultiXactStatus status
Definition: multixact.h:62
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:377
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode 
)
static

Definition at line 7061 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

7063 {
7064  int nmembers;
7065  MultiXactMember *members;
7066  bool result = false;
7067  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7068 
7069  if (HEAP_LOCKED_UPGRADED(infomask))
7070  return false;
7071 
7072  nmembers = GetMultiXactIdMembers(multi, &members, false,
7073  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7074  if (nmembers >= 0)
7075  {
7076  int i;
7077 
7078  for (i = 0; i < nmembers; i++)
7079  {
7080  TransactionId memxid;
7081  LOCKMODE memlockmode;
7082 
7083  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7084 
7085  /* ignore members that don't conflict with the lock we want */
7086  if (!DoLockModesConflict(memlockmode, wanted))
7087  continue;
7088 
7089  /* ignore members from current xact */
7090  memxid = members[i].xid;
7092  continue;
7093 
7094  if (ISUPDATE_from_mxstatus(members[i].status))
7095  {
7096  /* ignore aborted updaters */
7097  if (TransactionIdDidAbort(memxid))
7098  continue;
7099  }
7100  else
7101  {
7102  /* ignore lockers-only that are no longer in progress */
7103  if (!TransactionIdIsInProgress(memxid))
7104  continue;
7105  }
7106 
7107  /*
7108  * Whatever remains are either live lockers that conflict with our
7109  * wanted lock, and updaters that are not aborted. Those conflict
7110  * with what we want, so return true.
7111  */
7112  result = true;
7113  break;
7114  }
7115  pfree(members);
7116  }
7117 
7118  return result;
7119 }
uint32 TransactionId
Definition: c.h:391
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:173
void pfree(void *pointer)
Definition: mcxt.c:949
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_modified,
bool copy 
)
static

Definition at line 7888 of file heapam.c.

References DEBUG4, elog, ERROR, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleGetOid, HeapTupleHasExternal, HeapTupleSetOid, MaxHeapAttributeNumber, tupleDesc::natts, ObjectIdAttributeNumber, OidIsValid, RelationData::rd_index, RelationData::rd_rel, RelationClose(), RelationGetDescr, RelationGetRelationName, RelationGetReplicaIndex(), RelationIdGetRelation(), RelationIsLogicallyLogged, REPLICA_IDENTITY_FULL, REPLICA_IDENTITY_NOTHING, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

7889 {
7890  TupleDesc desc = RelationGetDescr(relation);
7891  Oid replidindex;
7892  Relation idx_rel;
7893  TupleDesc idx_desc;
7894  char replident = relation->rd_rel->relreplident;
7895  HeapTuple key_tuple = NULL;
7896  bool nulls[MaxHeapAttributeNumber];
7898  int natt;
7899 
7900  *copy = false;
7901 
7902  if (!RelationIsLogicallyLogged(relation))
7903  return NULL;
7904 
7905  if (replident == REPLICA_IDENTITY_NOTHING)
7906  return NULL;
7907 
7908  if (replident == REPLICA_IDENTITY_FULL)
7909  {
7910  /*
7911  * When logging the entire old tuple, it very well could contain
7912  * toasted columns. If so, force them to be inlined.
7913  */
7914  if (HeapTupleHasExternal(tp))
7915  {
7916  *copy = true;
7917  tp = toast_flatten_tuple(tp, RelationGetDescr(relation));
7918  }
7919  return tp;
7920  }
7921 
7922  /* if the key hasn't changed and we're only logging the key, we're done */
7923  if (!key_changed)
7924  return NULL;
7925 
7926  /* find the replica identity index */
7927  replidindex = RelationGetReplicaIndex(relation);
7928  if (!OidIsValid(replidindex))
7929  {
7930  elog(DEBUG4, "could not find configured replica identity for table \"%s\"",
7931  RelationGetRelationName(relation));
7932  return NULL;
7933  }
7934 
7935  idx_rel = RelationIdGetRelation(replidindex);
7936  idx_desc = RelationGetDescr(idx_rel);
7937 
7938  /* deform tuple, so we have fast access to columns */
7939  heap_deform_tuple(tp, desc, values, nulls);
7940 
7941  /* set all columns to NULL, regardless of whether they actually are */
7942  memset(nulls, 1, sizeof(nulls));
7943 
7944  /*
7945  * Now set all columns contained in the index to NOT NULL, they cannot
7946  * currently be NULL.
7947  */
7948  for (natt = 0; natt < idx_desc->natts; natt++)
7949  {
7950  int attno = idx_rel->rd_index->indkey.values[natt];
7951 
7952  if (attno < 0)
7953  {
7954  /*
7955  * The OID column can appear in an index definition, but that's
7956  * OK, because we always copy the OID if present (see below).
7957  * Other system columns may not.
7958  */
7959  if (attno == ObjectIdAttributeNumber)
7960  continue;
7961  elog(ERROR, "system column in index");
7962  }
7963  nulls[attno - 1] = false;
7964  }
7965 
7966  key_tuple = heap_form_tuple(desc, values, nulls);
7967  *copy = true;
7968  RelationClose(idx_rel);
7969 
7970  /*
7971  * Always copy oids if the table has them, even if not included in the
7972  * index. The space in the logged tuple is used anyway, so there's little
7973  * point in not including the information.
7974  */
7975  if (relation->rd_rel->relhasoids)
7976  HeapTupleSetOid(key_tuple, HeapTupleGetOid(tp));
7977 
7978  /*
7979  * If the tuple, which by here only contains indexed columns, still has
7980  * toasted columns, force them to be inlined. This is somewhat unlikely
7981  * since there's limits on the size of indexed columns, so we don't
7982  * duplicate toast_flatten_tuple()s functionality in the above loop over
7983  * the indexed columns, even if it would be more efficient.
7984  */
7985  if (HeapTupleHasExternal(key_tuple))
7986  {
7987  HeapTuple oldtup = key_tuple;
7988 
7989  key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
7990  heap_freetuple(oldtup);
7991  }
7992 
7993  return key_tuple;
7994 }
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: tuptoaster.c:1085
Oid RelationGetReplicaIndex(Relation relation)
Definition: relcache.c:4688
#define RelationGetDescr(relation)
Definition: rel.h:428
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
#define REPLICA_IDENTITY_NOTHING
Definition: pg_class.h:177
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:695
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:575
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
unsigned int Oid
Definition: postgres_ext.h:31
#define DEBUG4
Definition: elog.h:22
#define OidIsValid(objectId)
Definition: c.h:532
int natts
Definition: tupdesc.h:73
#define HeapTupleSetOid(tuple, oid)
Definition: htup_details.h:698
Form_pg_index rd_index
Definition: rel.h:159
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:179
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:436
void RelationClose(Relation relation)
Definition: relcache.c:2164
uintptr_t Datum
Definition: postgres.h:372
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:936
static Datum values[MAXATTR]
Definition: bootstrap.c:164
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
#define elog
Definition: elog.h:219
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
Relation RelationIdGetRelation(Oid relationId)
Definition: relcache.c:2075
static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8284 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8285 {
8286  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8288  *infomask2 &= ~HEAP_KEYS_UPDATED;
8289 
8290  if (infobits & XLHL_XMAX_IS_MULTI)
8291  *infomask |= HEAP_XMAX_IS_MULTI;
8292  if (infobits & XLHL_XMAX_LOCK_ONLY)
8293  *infomask |= HEAP_XMAX_LOCK_ONLY;
8294  if (infobits & XLHL_XMAX_EXCL_LOCK)
8295  *infomask |= HEAP_XMAX_EXCL_LOCK;
8296  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8297  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8298  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8299 
8300  if (infobits & XLHL_KEYS_UPDATED)
8301  *infomask2 |= HEAP_KEYS_UPDATED;
8302 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243
void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 2393 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_shutdown(), and transientrel_shutdown().

2394 {
2395  if (bistate->current_buf != InvalidBuffer)
2396  ReleaseBuffer(bistate->current_buf);
2397  FreeAccessStrategy(bistate->strategy);
2398  pfree(bistate);
2399 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:949
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
BufferAccessStrategy strategy
Definition: hio.h:33
Buffer current_buf
Definition: hio.h:34
static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 6401 of file heapam.c.

References Assert, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

6404 {
6406  int i;
6407  MultiXactMember *members;
6408  int nmembers;
6409  bool need_replace;
6410  int nnewmembers;
6411  MultiXactMember *newmembers;
6412  bool has_lockers;
6413  TransactionId update_xid;
6414  bool update_committed;
6415 
6416  *flags = 0;
6417 
6418  /* We should only be called in Multis */
6419  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6420 
6421  if (!MultiXactIdIsValid(multi) ||
6422  HEAP_LOCKED_UPGRADED(t_infomask))
6423  {
6424  /* Ensure infomask bits are appropriately set/reset */
6425  *flags |= FRM_INVALIDATE_XMAX;
6426  return InvalidTransactionId;
6427  }
6428  else if (MultiXactIdPrecedes(multi, cutoff_multi))
6429  {
6430  /*
6431  * This old multi cannot possibly have members still running. If it
6432  * was a locker only, it can be removed without any further
6433  * consideration; but if it contained an update, we might need to
6434  * preserve it.
6435  */
6437  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)));
6438  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6439  {
6440  *flags |= FRM_INVALIDATE_XMAX;
6441  xid = InvalidTransactionId; /* not strictly necessary */
6442  }
6443  else
6444  {
6445  /* replace multi by update xid */
6446  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
6447 
6448  /* wasn't only a lock, xid needs to be valid */
6450 
6451  /*
6452  * The updating transaction cannot possibly be still running, but
6453  * verify whether it has committed, and request to set the
6454  * COMMITTED flag if so. (We normally don't see any tuples in
6455  * this state, because they are removed by page pruning before we
6456  * try to freeze the page; but this can happen if the updating
6457  * transaction commits after the page is pruned but before
6458  * HeapTupleSatisfiesVacuum).
6459  */
6460  if (TransactionIdPrecedes(xid, cutoff_xid))
6461  {
6462  if (TransactionIdDidCommit(xid))
6464  else
6465  {
6466  *flags |= FRM_INVALIDATE_XMAX;
6467  xid = InvalidTransactionId; /* not strictly necessary */
6468  }
6469  }
6470  else
6471  {
6472  *flags |= FRM_RETURN_IS_XID;
6473  }
6474  }
6475 
6476  return xid;
6477  }
6478 
6479  /*
6480  * This multixact might have or might not have members still running, but
6481  * we know it's valid and is newer than the cutoff point for multis.
6482  * However, some member(s) of it may be below the cutoff for Xids, so we
6483  * need to walk the whole members array to figure out what to do, if
6484  * anything.
6485  */
6486 
6487  nmembers =
6488  GetMultiXactIdMembers(multi, &members, false,
6489  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6490  if (nmembers <= 0)
6491  {
6492  /* Nothing worth keeping */
6493  *flags |= FRM_INVALIDATE_XMAX;
6494  return InvalidTransactionId;
6495  }
6496 
6497  /* is there anything older than the cutoff? */
6498  need_replace = false;
6499  for (i = 0; i < nmembers; i++)
6500  {
6501  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6502  {
6503  need_replace = true;
6504  break;
6505  }
6506  }
6507 
6508  /*
6509  * In the simplest case, there is no member older than the cutoff; we can
6510  * keep the existing MultiXactId as is.
6511  */
6512  if (!need_replace)
6513  {
6514  *flags |= FRM_NOOP;
6515  pfree(members);
6516  return InvalidTransactionId;
6517  }
6518 
6519  /*
6520  * If the multi needs to be updated, figure out which members do we need
6521  * to keep.
6522  */
6523  nnewmembers = 0;
6524  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6525  has_lockers = false;
6526  update_xid = InvalidTransactionId;
6527  update_committed = false;
6528 
6529  for (i = 0; i < nmembers; i++)
6530  {
6531  /*
6532  * Determine whether to keep this member or ignore it.
6533  */
6534  if (ISUPDATE_from_mxstatus(members[i].status))
6535  {
6536  TransactionId xid = members[i].xid;
6537 
6538  /*
6539  * It's an update; should we keep it? If the transaction is known
6540  * aborted or crashed then it's okay to ignore it, otherwise not.
6541  *
6542  * As with all tuple visibility routines, it's critical to test
6543  * TransactionIdIsInProgress before TransactionIdDidCommit,
6544  * because of race conditions explained in detail in tqual.c.
6545  *
6546  * We normally don't see committed updating transactions earlier
6547  * than the cutoff xid, because they are removed by page pruning
6548  * before we try to freeze the page; but it can happen if the
6549  * updating transaction commits after the page is pruned but
6550  * before HeapTupleSatisfiesVacuum.
6551  */
6554  {
6555  Assert(!TransactionIdIsValid(update_xid));
6556  update_xid = xid;
6557  }
6558  else if (TransactionIdDidCommit(xid))
6559  {
6560  /*
6561  * The transaction committed, so we can tell caller to set
6562  * HEAP_XMAX_COMMITTED. (We can only do this because we know
6563  * the transaction is not running.)
6564  */
6565  Assert(!TransactionIdIsValid(update_xid));
6566  update_committed = true;
6567  update_xid = xid;
6568  }
6569 
6570  /*
6571  * Not in progress, not committed -- must be aborted or crashed;
6572  * we can ignore it.
6573  */
6574 
6575  /*
6576  * If we determined that it's an Xid corresponding to an update
6577  * that must be retained, additionally add it to the list of
6578  * members of the new Multi, in case we end up using that. (We
6579  * might still decide to use only an update Xid and not a multi,
6580  * but it's easier to maintain the list as we walk the old members
6581  * list.)
6582  */
6583  if (TransactionIdIsValid(update_xid))
6584  newmembers[nnewmembers++] = members[i];
6585  }
6586  else
6587  {
6588  /* We only keep lockers if they are still running */
6589  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6590  TransactionIdIsInProgress(members[i].xid))
6591  {
6592  /* running locker cannot possibly be older than the cutoff */
6593  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6594  newmembers[nnewmembers++] = members[i];
6595  has_lockers = true;
6596  }
6597  }
6598  }
6599 
6600  pfree(members);
6601 
6602  if (nnewmembers == 0)
6603  {
6604  /* nothing worth keeping!? Tell caller to remove the whole thing */
6605  *flags |= FRM_INVALIDATE_XMAX;
6606  xid = InvalidTransactionId;
6607  }
6608  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6609  {
6610  /*
6611  * If there's a single member and it's an update, pass it back alone
6612  * without creating a new Multi. (XXX we could do this when there's a
6613  * single remaining locker, too, but that would complicate the API too
6614  * much; moreover, the case with the single updater is more
6615  * interesting, because those are longer-lived.)
6616  */
6617  Assert(nnewmembers == 1);
6618  *flags |= FRM_RETURN_IS_XID;
6619  if (update_committed)
6620  *flags |= FRM_MARK_COMMITTED;
6621  xid = update_xid;
6622  }
6623  else
6624  {
6625  /*
6626  * Create a new multixact with the surviving members of the previous
6627  * one, to set as new Xmax in the tuple.
6628  */
6629  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6630  *flags |= FRM_RETURN_IS_MULTI;
6631  }
6632 
6633  pfree(newmembers);
6634 
6635  return xid;
6636 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:6375
#define FRM_MARK_COMMITTED
Definition: heapam.c:6377
uint32 TransactionId
Definition: c.h:391
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:746
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6996
void pfree(void *pointer)
Definition: mcxt.c:949
TransactionId xid
Definition: multixact.h:61
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:6374
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
#define MultiXactIdIsValid(multi)
Definition: multixact.h:27
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:6376
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define Assert(condition)
Definition: c.h:681
#define FRM_NOOP
Definition: heapam.c:6373
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3140
void * palloc(Size size)
Definition: mcxt.c:848
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549
static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 4550 of file heapam.c.

References elog, ERROR, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

4551 {
4552  int retval;
4553 
4554  if (is_update)
4555  retval = tupleLockExtraInfo[mode].updstatus;
4556  else
4557  retval = tupleLockExtraInfo[mode].lockstatus;
4558 
4559  if (retval == -1)
4560  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4561  is_update ? "true" : "false");
4562 
4563  return (MultiXactStatus) retval;
4564 }
MultiXactStatus
Definition: multixact.h:40
#define ERROR
Definition: elog.h:43
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
#define elog
Definition: elog.h:219
BulkInsertState GetBulkInsertState ( void  )

Definition at line 2379 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_startup(), and transientrel_startup().

2380 {
2381  BulkInsertState bistate;
2382 
2383  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2385  bistate->current_buf = InvalidBuffer;
2386  return bistate;
2387 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:33
BufferAccessStrategy strategy
Definition: hio.h:33
void * palloc(Size size)
Definition: mcxt.c:848
Buffer current_buf
Definition: hio.h:34
static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 6915 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

6917 {
6918  int nmembers;
6919  MultiXactMember *members;
6920  int i;
6921  uint16 bits = HEAP_XMAX_IS_MULTI;
6922  uint16 bits2 = 0;
6923  bool has_update = false;
6924  LockTupleMode strongest = LockTupleKeyShare;
6925 
6926  /*
6927  * We only use this in multis we just created, so they cannot be values
6928  * pre-pg_upgrade.
6929  */
6930  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6931 
6932  for (i = 0; i < nmembers; i++)
6933  {
6934  LockTupleMode mode;
6935 
6936  /*
6937  * Remember the strongest lock mode held by any member of the
6938  * multixact.
6939  */
6940  mode = TUPLOCK_from_mxstatus(members[i].status);
6941  if (mode > strongest)
6942  strongest = mode;
6943 
6944  /* See what other bits we need */
6945  switch (members[i].status)
6946  {
6950  break;
6951 
6953  bits2 |= HEAP_KEYS_UPDATED;
6954  break;
6955 
6957  has_update = true;
6958  break;
6959 
6960  case MultiXactStatusUpdate:
6961  bits2 |= HEAP_KEYS_UPDATED;
6962  has_update = true;
6963  break;
6964  }
6965  }
6966 
6967  if (strongest == LockTupleExclusive ||
6968  strongest == LockTupleNoKeyExclusive)
6969  bits |= HEAP_XMAX_EXCL_LOCK;
6970  else if (strongest == LockTupleShare)
6971  bits |= HEAP_XMAX_SHR_LOCK;
6972  else if (strongest == LockTupleKeyShare)
6973  bits |= HEAP_XMAX_KEYSHR_LOCK;
6974 
6975  if (!has_update)
6976  bits |= HEAP_XMAX_LOCK_ONLY;
6977 
6978  if (nmembers > 0)
6979  pfree(members);
6980 
6981  *new_infomask = bits;
6982  *new_infomask2 = bits2;
6983 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:185
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:257
void pfree(void *pointer)
Definition: mcxt.c:949
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:203
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
void heap2_redo ( XLogReaderState record)

Definition at line 9148 of file heapam.c.

References elog, heap_xlog_clean(), heap_xlog_cleanup_info(), heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_visible(), PANIC, XLOG_HEAP2_CLEAN, XLOG_HEAP2_CLEANUP_INFO, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

9149 {
9150  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
9151 
9152  switch (info & XLOG_HEAP_OPMASK)
9153  {
9154  case XLOG_HEAP2_CLEAN:
9155  heap_xlog_clean(record);
9156  break;
9158  heap_xlog_freeze_page(record);
9159  break;
9161  heap_xlog_cleanup_info(record);
9162  break;
9163  case XLOG_HEAP2_VISIBLE:
9164  heap_xlog_visible(record);
9165  break;
9167  heap_xlog_multi_insert(record);
9168  break;
9170  heap_xlog_lock_updated(record);
9171  break;
9172  case XLOG_HEAP2_NEW_CID:
9173 
9174  /*
9175  * Nothing to do on a real replay, only used during logical
9176  * decoding.
9177  */
9178  break;
9179  case XLOG_HEAP2_REWRITE:
9180  heap_xlog_logical_rewrite(record);
9181  break;
9182  default:
9183  elog(PANIC, "heap2_redo: unknown op code %u", info);
9184  }
9185 }
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1117
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:256
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define PANIC
Definition: elog.h:53
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:9009
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:8226
#define XLOG_HEAP2_CLEAN
Definition: heapam_xlog.h:54
#define XLOG_HEAP2_CLEANUP_INFO
Definition: heapam_xlog.h:56
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:8491
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:222
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
static void heap_xlog_cleanup_info(XLogReaderState *record)
Definition: heapam.c:8000
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:55
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:8111
#define elog
Definition: elog.h:219
static void heap_xlog_clean(XLogReaderState *record)
Definition: heapam.c:8021
void heap_abort_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6149 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), ReadBuffer(), RecentGlobalXmin, REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, toast_delete(), TransactionIdIsValid, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by ExecInsert(), and toast_delete_datum().

6150 {
6152  ItemPointer tid = &(tuple->t_self);
6153  ItemId lp;
6154  HeapTupleData tp;
6155  Page page;
6156  BlockNumber block;
6157  Buffer buffer;
6158 
6159  Assert(ItemPointerIsValid(tid));
6160 
6161  block = ItemPointerGetBlockNumber(tid);
6162  buffer = ReadBuffer(relation, block);
6163  page = BufferGetPage(buffer);
6164 
6166 
6167  /*
6168  * Page can't be all visible, we just inserted into it, and are still
6169  * running.
6170  */
6171  Assert(!PageIsAllVisible(page));
6172 
6173  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6174  Assert(ItemIdIsNormal(lp));
6175 
6176  tp.t_tableOid = RelationGetRelid(relation);
6177  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6178  tp.t_len = ItemIdGetLength(lp);
6179  tp.t_self = *tid;
6180 
6181  /*
6182  * Sanity check that the tuple really is a speculatively inserted tuple,
6183  * inserted by us.
6184  */
6185  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6186  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6187  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6188  elog(ERROR, "attempted to kill a non-speculative tuple");
6190 
6191  /*
6192  * No need to check for serializable conflicts here. There is never a
6193  * need for a combocid, either. No need to extract replica identity, or
6194  * do anything special with infomask bits.
6195  */
6196 
6198 
6199  /*
6200  * The tuple will become DEAD immediately. Flag that this page
6201  * immediately is a candidate for pruning by setting xmin to
6202  * RecentGlobalXmin. That's not pretty, but it doesn't seem worth
6203  * inventing a nicer API for this.
6204  */
6207 
6208  /* store transaction information of xact deleting the tuple */
6211 
6212  /*
6213  * Set the tuple header xmin to InvalidTransactionId. This makes the
6214  * tuple immediately invisible everyone. (In particular, to any
6215  * transactions waiting on the speculative token, woken up later.)
6216  */
6218 
6219  /* Clear the speculative insertion token too */
6220  tp.t_data->t_ctid = tp.t_self;
6221 
6222  MarkBufferDirty(buffer);
6223 
6224  /*
6225  * XLOG stuff
6226  *
6227  * The WAL records generated here match heap_delete(). The same recovery
6228  * routines are used.
6229  */
6230  if (RelationNeedsWAL(relation))
6231  {
6232  xl_heap_delete xlrec;
6233  XLogRecPtr recptr;
6234 
6235  xlrec.flags = XLH_DELETE_IS_SUPER;
6237  tp.t_data->t_infomask2);
6239  xlrec.xmax = xid;
6240 
6241  XLogBeginInsert();
6242  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
6243  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6244 
6245  /* No replica identity & replication origin logged */
6246 
6247  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6248 
6249  PageSetLSN(page, recptr);
6250  }
6251 
6252  END_CRIT_SECTION();
6253 
6254  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6255 
6256  if (HeapTupleHasExternal(&tp))
6257  {
6258  Assert(!IsToastRelation(relation));
6259  toast_delete(relation, &tp, true);
6260  }
6261 
6262  /*
6263  * Never need to mark tuple for invalidation, since catalogs don't support
6264  * speculative insertion
6265  */
6266 
6267  /* Now we can release the buffer */
6268  ReleaseBuffer(buffer);
6269 
6270  /* count deletion, as we counted the insertion too */
6271  pgstat_count_heap_delete(relation);
6272 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
bool IsToastRelation(Relation relation)
Definition: catalog.c:136
#define HEAP_XMAX_BITS
Definition: htup_details.h:256
union HeapTupleHeaderData::@45 t_choice
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:95
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:3014
HeapTupleFields t_heap
Definition: htup_details.h:146
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:391
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:423
#define PageSetPrunable(page, xid)
Definition: bufpage.h:394
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
OffsetNumber offnum
Definition: heapam_xlog.h:105
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:502
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:418
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
TransactionId t_xmin
Definition: htup_details.h:118
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_MOVED
Definition: htup_details.h:202
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:681
uint8 infobits_set
Definition: heapam_xlog.h:106
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:505
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1953
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:416
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:313
static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 5269 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

5271 {
5272  if (*have_tuple_lock)
5273  return true;
5274 
5275  switch (wait_policy)
5276  {
5277  case LockWaitBlock:
5278  LockTupleTuplock(relation, tid, mode);
5279  break;
5280 
5281  case LockWaitSkip:
5282  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5283  return false;
5284  break;
5285 
5286  case LockWaitError:
5287  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5288  ereport(ERROR,
5289  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5290  errmsg("could not obtain lock on row in relation \"%s\"",
5291  RelationGetRelationName(relation))));
5292  break;
5293  }
5294  *have_tuple_lock = true;
5295 
5296  return true;
5297 }
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:181
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:185
int errcode(int sqlerrcode)
Definition: elog.c:575
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:436
#define ereport(elevel, rest)
Definition: elog.h:122
int errmsg(const char *fmt,...)
Definition: elog.c:797
HeapScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1397 of file heapam.c.

References heap_beginscan_internal().

Referenced by AlterDomainNotNull(), ATRewriteTable(), check_default_allows_bound(), copy_heap_data(), CopyTo(), DefineQueryRewrite(), pgrowlocks(), pgstat_collect_oids(), RelationFindReplTupleSeq(), SeqNext(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1399 {
1400  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1401  true, true, true, false, false, false);
1402 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
HeapScanDesc heap_beginscan_bm ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1425 of file heapam.c.

References heap_beginscan_internal().

Referenced by ExecInitBitmapHeapScan().

1427 {
1428  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1429  false, false, true, true, false, false);
1430 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
HeapScanDesc heap_beginscan_catalog ( Relation  relation,
int  nkeys,
ScanKey  key 
)

Definition at line 1405 of file heapam.c.

References GetCatalogSnapshot(), heap_beginscan_internal(), RegisterSnapshot(), and RelationGetRelid.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), boot_openrel(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), objectsInSchemaToOids(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1406 {
1407  Oid relid = RelationGetRelid(relation);
1408  Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
1409 
1410  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1411  true, true, true, false, false, true);
1412 }
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:863
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:440
unsigned int Oid
Definition: postgres_ext.h:31
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
#define RelationGetRelid(relation)
Definition: rel.h:416
static HeapScanDesc heap_beginscan_internal ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelHeapScanDesc  parallel_scan,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode,
bool  is_bitmapscan,
bool  is_samplescan,
bool  temp_snap 
)
static

Definition at line 1443 of file heapam.c.

References initscan(), IsMVCCSnapshot, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_allow_strat, HeapScanDescData::rs_allow_sync, HeapScanDescData::rs_bitmapscan, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_parallel, HeapScanDescData::rs_rd, HeapScanDescData::rs_samplescan, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and HeapTupleData::t_tableOid.

Referenced by heap_beginscan(), heap_beginscan_bm(), heap_beginscan_catalog(), heap_beginscan_parallel(), heap_beginscan_sampling(), and heap_beginscan_strat().

1452 {
1453  HeapScanDesc scan;
1454 
1455  /*
1456  * increment relation ref count while scanning relation
1457  *
1458  * This is just to make really sure the relcache entry won't go away while
1459  * the scan has a pointer to it. Caller should be holding the rel open
1460  * anyway, so this is redundant in all normal scenarios...
1461  */
1463 
1464  /*
1465  * allocate and initialize scan descriptor
1466  */
1467  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1468 
1469  scan->rs_rd = relation;
1470  scan->rs_snapshot = snapshot;
1471  scan->rs_nkeys = nkeys;
1472  scan->rs_bitmapscan = is_bitmapscan;
1473  scan->rs_samplescan = is_samplescan;
1474  scan->rs_strategy = NULL; /* set in initscan */
1475  scan->rs_allow_strat = allow_strat;
1476  scan->rs_allow_sync = allow_sync;
1477  scan->rs_temp_snap = temp_snap;
1478  scan->rs_parallel = parallel_scan;
1479 
1480  /*
1481  * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1482  */
1483  scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(snapshot);
1484 
1485  /*
1486  * For a seqscan in a serializable transaction, acquire a predicate lock
1487  * on the entire relation. This is required not only to lock all the
1488  * matching tuples, but also to conflict with new insertions into the
1489  * table. In an indexscan, we take page locks on the index pages covering
1490  * the range specified in the scan qual, but in a heap scan there is
1491  * nothing more fine-grained to lock. A bitmap scan is a different story,
1492  * there we have already scanned the index and locked the index pages
1493  * covering the predicate. But in that case we still have to lock any
1494  * matching heap tuples.
1495  */
1496  if (!is_bitmapscan)
1497  PredicateLockRelation(relation, snapshot);
1498 
1499  /* we only need to set this up once */
1500  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1501 
1502  /*
1503  * we do this here instead of in initscan() because heap_rescan also calls
1504  * initscan() and we don't want to allocate memory again
1505  */
1506  if (nkeys > 0)
1507  scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1508  else
1509  scan->rs_key = NULL;
1510 
1511  initscan(scan, key, false);
1512 
1513  return scan;
1514 }
bool rs_allow_sync
Definition: relscan.h:56
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2498
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:100
HeapTupleData rs_ctup
Definition: relscan.h:69
bool rs_bitmapscan
Definition: relscan.h:52
bool rs_pageatatime
Definition: relscan.h:54
ParallelHeapScanDesc rs_parallel
Definition: relscan.h:73
ScanKeyData * ScanKey
Definition: skey.h:75
Snapshot rs_snapshot
Definition: relscan.h:49
Oid t_tableOid
Definition: htup.h:66
bool rs_temp_snap
Definition: relscan.h:57
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2131
BufferAccessStrategy rs_strategy
Definition: relscan.h:64
Relation rs_rd
Definition: relscan.h:48
#define IsMVCCSnapshot(snapshot)
Definition: tqual.h:31
void * palloc(Size size)
Definition: mcxt.c:848
bool rs_allow_strat
Definition: relscan.h:55
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:216
bool rs_samplescan
Definition: relscan.h:53
#define RelationGetRelid(relation)
Definition: rel.h:416
ScanKey rs_key
Definition: relscan.h:51
HeapScanDesc heap_beginscan_parallel ( Relation  relation,
ParallelHeapScanDesc  parallel_scan 
)

Definition at line 1650 of file heapam.c.

References Assert, heap_beginscan_internal(), ParallelHeapScanDescData::phs_relid, ParallelHeapScanDescData::phs_snapshot_data, RegisterSnapshot(), RelationGetRelid, and RestoreSnapshot().

Referenced by ExecSeqScanInitializeDSM(), and ExecSeqScanInitializeWorker().

1651 {
1652  Snapshot snapshot;
1653 
1654  Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
1655  snapshot = RestoreSnapshot(parallel_scan->phs_snapshot_data);
1656  RegisterSnapshot(snapshot);
1657 
1658  return heap_beginscan_internal(relation, snapshot, 0, NULL, parallel_scan,
1659  true, true, true, false, false, true);
1660 }
char phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER]
Definition: relscan.h:42
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2121
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:863
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
#define Assert(condition)
Definition: c.h:681
#define RelationGetRelid(relation)
Definition: rel.h:416
HeapScanDesc heap_beginscan_sampling ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode 
)

Definition at line 1433 of file heapam.c.

References heap_beginscan_internal().

Referenced by tablesample_init().

1436 {
1437  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1438  allow_strat, allow_sync, allow_pagemode,
1439  false, true, false);
1440 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
HeapScanDesc heap_beginscan_strat ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync 
)

Definition at line 1415 of file heapam.c.

References heap_beginscan_internal().

Referenced by IndexBuildHeapRangeScan(), IndexCheckExclusion(), pgstat_heap(), systable_beginscan(), and validate_index_heapscan().

1418 {
1419  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1420  allow_strat, allow_sync, true,
1421  false, false, false);
1422 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
HTSU_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
HeapUpdateFailureData hufd 
)

Definition at line 3073 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleBeingUpdated, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility, HeapTupleSelfUpdated, HeapTupleUpdated, xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), RELKIND_MATVIEW, RELKIND_RELATION, REPLICA_IDENTITY_FULL, SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, toast_delete(), TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, HeapUpdateFailureData::xmax, xl_heap_delete::xmax, and xmax_infomask_changed().

Referenced by ExecDelete(), and simple_heap_delete().

3076 {
3077  HTSU_Result result;
3079  ItemId lp;
3080  HeapTupleData tp;
3081  Page page;
3082  BlockNumber block;
3083  Buffer buffer;
3084  Buffer vmbuffer = InvalidBuffer;
3085  TransactionId new_xmax;
3086  uint16 new_infomask,
3087  new_infomask2;
3088  bool have_tuple_lock = false;
3089  bool iscombo;
3090  bool all_visible_cleared = false;
3091  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
3092  bool old_key_copied = false;
3093 
3094  Assert(ItemPointerIsValid(tid));
3095 
3096  /*
3097  * Forbid this during a parallel operation, lest it allocate a combocid.
3098  * Other workers might need that combocid for visibility checks, and we
3099  * have no provision for broadcasting it to them.
3100  */
3101  if (IsInParallelMode())
3102  ereport(ERROR,
3103  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3104  errmsg("cannot delete tuples during a parallel operation")));
3105 
3106  block = ItemPointerGetBlockNumber(tid);
3107  buffer = ReadBuffer(relation, block);
3108  page = BufferGetPage(buffer);
3109 
3110  /*
3111  * Before locking the buffer, pin the visibility map page if it appears to
3112  * be necessary. Since we haven't got the lock yet, someone else might be
3113  * in the middle of changing this, so we'll need to recheck after we have
3114  * the lock.
3115  */
3116  if (PageIsAllVisible(page))
3117  visibilitymap_pin(relation, block, &vmbuffer);
3118 
3120 
3121  /*
3122  * If we didn't pin the visibility map page and the page has become all
3123  * visible while we were busy locking the buffer, we'll have to unlock and
3124  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
3125  * unfortunate, but hopefully shouldn't happen often.
3126  */
3127  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3128  {
3129  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3130  visibilitymap_pin(relation, block, &vmbuffer);
3132  }
3133 
3134  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3135  Assert(ItemIdIsNormal(lp));
3136 
3137  tp.t_tableOid = RelationGetRelid(relation);
3138  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3139  tp.t_len = ItemIdGetLength(lp);
3140  tp.t_self = *tid;
3141 
3142 l1:
3143  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
3144 
3145  if (result == HeapTupleInvisible)
3146  {
3147  UnlockReleaseBuffer(buffer);
3148  ereport(ERROR,
3149  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3150  errmsg("attempted to delete invisible tuple")));
3151  }
3152  else if (result == HeapTupleBeingUpdated && wait)
3153  {
3154  TransactionId xwait;
3155  uint16 infomask;
3156 
3157  /* must copy state data before unlocking buffer */
3158  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
3159  infomask = tp.t_data->t_infomask;
3160 
3161  /*
3162  * Sleep until concurrent transaction ends -- except when there's a
3163  * single locker and it's our own transaction. Note we don't care
3164  * which lock mode the locker has, because we need the strongest one.
3165  *
3166  * Before sleeping, we need to acquire tuple lock to establish our
3167  * priority for the tuple (see heap_lock_tuple). LockTuple will
3168  * release us when we are next-in-line for the tuple.
3169  *
3170  * If we are forced to "start over" below, we keep the tuple lock;
3171  * this arranges that we stay at the head of the line while rechecking
3172  * tuple state.
3173  */
3174  if (infomask & HEAP_XMAX_IS_MULTI)
3175  {
3176  /* wait for multixact */
3177  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3179  {
3180  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3181 
3182  /* acquire tuple lock, if necessary */
3184  LockWaitBlock, &have_tuple_lock);
3185 
3186  /* wait for multixact */
3188  relation, &(tp.t_self), XLTW_Delete,
3189  NULL);
3191 
3192  /*
3193  * If xwait had just locked the tuple then some other xact
3194  * could update this tuple before we get to this point. Check
3195  * for xmax change, and start over if so.
3196  */
3197  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3199  xwait))
3200  goto l1;
3201  }
3202 
3203  /*
3204  * You might think the multixact is necessarily done here, but not
3205  * so: it could have surviving members, namely our own xact or
3206  * other subxacts of this backend. It is legal for us to delete
3207  * the tuple in either case, however (the latter case is
3208  * essentially a situation of upgrading our former shared lock to
3209  * exclusive). We don't bother changing the on-disk hint bits
3210  * since we are about to overwrite the xmax altogether.
3211  */
3212  }
3213  else if (!TransactionIdIsCurrentTransactionId(xwait))
3214  {
3215  /*
3216  * Wait for regular transaction to end; but first, acquire tuple
3217  * lock.
3218  */
3219  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3221  LockWaitBlock, &have_tuple_lock);
3222  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
3224 
3225  /*
3226  * xwait is done, but if xwait had just locked the tuple then some
3227  * other xact could update this tuple before we get to this point.
3228  * Check for xmax change, and start over if so.
3229  */
3230  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3232  xwait))
3233  goto l1;
3234 
3235  /* Otherwise check if it committed or aborted */
3236  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
3237  }
3238 
3239  /*
3240  * We may overwrite if previous xmax aborted, or if it committed but
3241  * only locked the tuple without updating it.
3242  */
3243  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3246  result = HeapTupleMayBeUpdated;
3247  else
3248  result = HeapTupleUpdated;
3249  }
3250 
3251  if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
3252  {
3253  /* Perform additional check for transaction-snapshot mode RI updates */
3254  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3255  result = HeapTupleUpdated;
3256  }
3257 
3258  if (result != HeapTupleMayBeUpdated)
3259  {
3260  Assert(result == HeapTupleSelfUpdated ||
3261  result == HeapTupleUpdated ||
3262  result == HeapTupleBeingUpdated);
3264  hufd->ctid = tp.t_data->t_ctid;
3266  if (result == HeapTupleSelfUpdated)
3267  hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
3268  else
3269  hufd->cmax = InvalidCommandId;
3270  UnlockReleaseBuffer(buffer);
3271  if (have_tuple_lock)
3272  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3273  if (vmbuffer != InvalidBuffer)
3274  ReleaseBuffer(vmbuffer);
3275  return result;
3276  }
3277 
3278  /*
3279  * We're about to do the actual delete -- check for conflict first, to
3280  * avoid possibly having to roll back work we've just done.
3281  *
3282  * This is safe without a recheck as long as there is no possibility of
3283  * another process scanning the page between this check and the delete
3284  * being visible to the scan (i.e., an exclusive buffer content lock is
3285  * continuously held from this point until the tuple delete is visible).
3286  */
3287  CheckForSerializableConflictIn(relation, &tp, buffer);
3288 
3289  /* replace cid with a combo cid if necessary */
3290  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3291 
3292  /*
3293  * Compute replica identity tuple before entering the critical section so
3294  * we don't PANIC upon a memory allocation failure.
3295  */
3296  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3297 
3298  /*
3299  * If this is the first possibly-multixact-able operation in the current
3300  * transaction, set my per-backend OldestMemberMXactId setting. We can be
3301  * certain that the transaction will never become a member of any older
3302  * MultiXactIds than that. (We have to do this even if we end up just
3303  * using our own TransactionId below, since some other backend could
3304  * incorporate our XID into a MultiXact immediately afterwards.)
3305  */
3307 
3310  xid, LockTupleExclusive, true,
3311  &new_xmax, &new_infomask, &new_infomask2);
3312 
3314 
3315  /*
3316  * If this transaction commits, the tuple will become DEAD sooner or
3317  * later. Set flag that this page is a candidate for pruning once our xid
3318  * falls below the OldestXmin horizon. If the transaction finally aborts,
3319  * the subsequent page pruning will be a no-op and the hint will be
3320  * cleared.
3321  */
3322  PageSetPrunable(page, xid);
3323 
3324  if (PageIsAllVisible(page))
3325  {
3326  all_visible_cleared = true;
3327  PageClearAllVisible(page);
3328  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3329  vmbuffer, VISIBILITYMAP_VALID_BITS);
3330  }
3331 
3332  /* store transaction information of xact deleting the tuple */
3335  tp.t_data->t_infomask |= new_infomask;
3336  tp.t_data->t_infomask2 |= new_infomask2;
3338  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3339  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3340  /* Make sure there is no forward chain link in t_ctid */
3341  tp.t_data->t_ctid = tp.t_self;
3342 
3343  MarkBufferDirty(buffer);
3344 
3345  /*
3346  * XLOG stuff
3347  *
3348  * NB: heap_abort_speculative() uses the same xlog record and replay
3349  * routines.
3350  */
3351  if (RelationNeedsWAL(relation))
3352  {
3353  xl_heap_delete xlrec;
3354  XLogRecPtr recptr;
3355 
3356  /* For logical decode we need combocids to properly decode the catalog */
3358  log_heap_new_cid(relation, &tp);
3359 
3360  xlrec.flags = all_visible_cleared ? XLH_DELETE_ALL_VISIBLE_CLEARED : 0;
3362  tp.t_data->t_infomask2);
3364  xlrec.xmax = new_xmax;
3365 
3366  if (old_key_tuple != NULL)
3367  {
3368  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3370  else
3372  }
3373 
3374  XLogBeginInsert();
3375  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
3376 
3377  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3378 
3379  /*
3380  * Log replica identity of the deleted tuple if there is one
3381  */
3382  if (old_key_tuple != NULL)
3383  {
3384  xl_heap_header xlhdr;
3385 
3386  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3387  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3388  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3389 
3390  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
3391  XLogRegisterData((char *) old_key_tuple->t_data
3393  old_key_tuple->t_len
3395  }
3396 
3397  /* filtering by origin on a row level is much more efficient */
3399 
3400  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3401 
3402  PageSetLSN(page, recptr);
3403  }
3404 
3405  END_CRIT_SECTION();
3406 
3407  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3408 
3409  if (vmbuffer != InvalidBuffer)
3410  ReleaseBuffer(vmbuffer);
3411 
3412  /*
3413  * If the tuple has toasted out-of-line attributes, we need to delete
3414  * those items too. We have to do this before releasing the buffer
3415  * because we need to look at the contents of the tuple, but it's OK to
3416  * release the content lock on the buffer first.
3417  */
3418  if (relation->rd_rel->relkind != RELKIND_RELATION &&
3419  relation->rd_rel->relkind != RELKIND_MATVIEW)
3420  {
3421  /* toast table entries should never be recursively toasted */
3423  }
3424  else if (HeapTupleHasExternal(&tp))
3425  toast_delete(relation, &tp, false);
3426 
3427  /*
3428  * Mark tuple for invalidation from system caches at next command
3429  * boundary. We have to do this before releasing the buffer because we
3430  * need to look at the contents of the tuple.
3431  */
3432  CacheInvalidateHeapTuple(relation, &tp, NULL);
3433 
3434  /* Now we can release the buffer */
3435  ReleaseBuffer(buffer);
3436 
3437  /*
3438  * Release the lmgr tuple lock, if we had it.
3439  */
3440  if (have_tuple_lock)
3441  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3442 
3443  pgstat_count_heap_delete(relation);
3444 
3445  if (old_key_tuple != NULL && old_key_copied)
3446  heap_freetuple(old_key_tuple);
3447 
3448  return HeapTupleMayBeUpdated;
3449 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1605
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
#define SizeofHeapTupleHeader
Definition: htup_details.h:170
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7812
#define HEAP_XMAX_BITS
Definition: htup_details.h:256
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:3014
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:391
HTSU_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
Definition: tqual.c:460
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:3036
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:497
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define RELKIND_MATVIEW
Definition: pg_class.h:165
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define PageSetPrunable(page, xid)
Definition: bufpage.h:394
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
int errcode(int sqlerrcode)
Definition: elog.c:575
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
Form_pg_class rd_rel
Definition: rel.h:114
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4326
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:183
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
OffsetNumber offnum
Definition: heapam_xlog.h:105
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:623
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:369
unsigned short uint16
Definition: c.h:257
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:906
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:179
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7228
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:418
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:94
CommandId cmax
Definition: heapam.h:72
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:374
HTSU_Result
Definition: snapshot.h:121
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:399
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:5318
TransactionId xmax
Definition: heapam.h:71
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
#define InvalidSnapshot
Definition: snapshot.h:25
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:559
#define InvalidCommandId
Definition: c.h:408
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:2357
#define HEAP_MOVED
Definition: htup_details.h:202
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:5269
TransactionId MultiXactId
Definition: c.h:401
#define PageClearAllVisible(page)
Definition: bufpage.h:385
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:681
uint8 infobits_set
Definition: heapam_xlog.h:106
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, bool *copy)
Definition: heapam.c:7888
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:119
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
Definition: heapam.c:7061
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:505
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1953
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:154
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:92
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
ItemPointerData ctid
Definition: heapam.h:70
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define RELKIND_RELATION
Definition: pg_class.h:160
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:416
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:93
void heap_endscan ( HeapScanDesc  scan)

Definition at line 1565 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_cbuf, HeapScanDescData::rs_key, HeapScanDescData::rs_rd, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and UnregisterSnapshot().

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), check_default_allows_bound(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), ExecEndBitmapHeapScan(), ExecEndSampleScan(), ExecEndSeqScan(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), systable_endscan(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1566 {
1567  /* Note: no locking manipulations needed */
1568 
1569  /*
1570  * unpin scan buffers
1571  */
1572  if (BufferIsValid(scan->rs_cbuf))
1573  ReleaseBuffer(scan->rs_cbuf);
1574 
1575  /*
1576  * decrement relation reference count and free scan descriptor storage
1577  */
1579 
1580  if (scan->rs_key)
1581  pfree(scan->rs_key);
1582 
1583  if (scan->rs_strategy != NULL)
1585 
1586  if (scan->rs_temp_snap)
1588 
1589  pfree(scan);
1590 }
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:949
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2144
Snapshot rs_snapshot
Definition: relscan.h:49
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:905
bool rs_temp_snap
Definition: relscan.h:57
BufferAccessStrategy rs_strategy
Definition: relscan.h:64
Relation rs_rd
Definition: relscan.h:48
Buffer rs_cbuf
Definition: relscan.h:71
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
ScanKey rs_key
Definition: relscan.h:51
void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 6866 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_heap().

6867 {
6868  HeapTupleHeaderSetXmax(tuple, frz->xmax);
6869 
6870  if (frz->frzflags & XLH_FREEZE_XVAC)
6872 
6873  if (frz->frzflags & XLH_INVALID_XVAC)
6875 
6876  tuple->t_infomask = frz->t_infomask;
6877  tuple->t_infomask2 = frz->t_infomask2;
6878 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:417
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:374
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:298
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:294
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:293
bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf,
bool  keep_buf,
Relation  stats_relation 
)

Definition at line 1876 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CheckForSerializableConflictOut(), HeapTupleSatisfiesVisibility, InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, pgstat_count_heap_fetch, PredicateLockTuple(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by AfterTriggerExecute(), EvalPlanQualFetch(), EvalPlanQualFetchRowMarks(), ExecCheckTIDVisible(), ExecDelete(), ExecLockRows(), heap_lock_updated_tuple_rec(), and TidNext().

1882 {
1883  ItemPointer tid = &(tuple->t_self);
1884  ItemId lp;
1885  Buffer buffer;
1886  Page page;
1887  OffsetNumber offnum;
1888  bool valid;
1889 
1890  /*
1891  * Fetch and pin the appropriate page of the relation.
1892  */
1893  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1894 
1895  /*
1896  * Need share lock on buffer to examine tuple commit status.
1897  */
1898  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1899  page = BufferGetPage(buffer);
1900  TestForOldSnapshot(snapshot, relation, page);
1901 
1902  /*
1903  * We'd better check for out-of-range offnum in case of VACUUM since the
1904  * TID was obtained.
1905  */
1906  offnum = ItemPointerGetOffsetNumber(tid);
1907  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1908  {
1909  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1910  if (keep_buf)
1911  *userbuf = buffer;
1912  else
1913  {
1914  ReleaseBuffer(buffer);
1915  *userbuf = InvalidBuffer;
1916  }
1917  tuple->t_data = NULL;
1918  return false;
1919  }
1920 
1921  /*
1922  * get the item line pointer corresponding to the requested tid
1923  */
1924  lp = PageGetItemId(page, offnum);
1925 
1926  /*
1927  * Must check for deleted tuple.
1928  */
1929  if (!ItemIdIsNormal(lp))
1930  {
1931  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1932  if (keep_buf)
1933  *userbuf = buffer;
1934  else
1935  {
1936  ReleaseBuffer(buffer);
1937  *userbuf = InvalidBuffer;
1938  }
1939  tuple->t_data = NULL;
1940  return false;
1941  }
1942 
1943  /*
1944  * fill in *tuple fields
1945  */
1946  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1947  tuple->t_len = ItemIdGetLength(lp);
1948  tuple->t_tableOid = RelationGetRelid(relation);
1949 
1950  /*
1951  * check time qualification of tuple, then release lock
1952  */
1953  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1954 
1955  if (valid)
1956  PredicateLockTuple(relation, tuple, snapshot);
1957 
1958  CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1959 
1960  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1961 
1962  if (valid)
1963  {
1964  /*
1965  * All checks passed, so return the tuple as valid. Caller is now
1966  * responsible for releasing the buffer.
1967  */
1968  *userbuf = buffer;
1969 
1970  /* Count the successful fetch against appropriate rel, if any */
1971  if (stats_relation != NULL)
1972  pgstat_count_heap_fetch(stats_relation);
1973 
1974  return true;
1975  }
1976 
1977  /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1978  if (keep_buf)
1979  *userbuf = buffer;
1980  else
1981  {
1982  ReleaseBuffer(buffer);
1983  *userbuf = InvalidBuffer;
1984  }
1985 
1986  return false;
1987 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3945
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_self
Definition: htup.h:65
#define pgstat_count_heap_fetch(rel)
Definition: pgstat.h:1270
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2543
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:416
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
void heap_finish_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6058 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ExecInsert().

6059 {
6060  Buffer buffer;
6061  Page page;
6062  OffsetNumber offnum;
6063  ItemId lp = NULL;
6064  HeapTupleHeader htup;
6065 
6066  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6068  page = (Page) BufferGetPage(buffer);
6069 
6070  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6071  if (PageGetMaxOffsetNumber(page) >= offnum)
6072  lp = PageGetItemId(page, offnum);
6073 
6074  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6075  elog(ERROR, "invalid lp");
6076 
6077  htup = (HeapTupleHeader) PageGetItem(page, lp);
6078 
6079  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
6081  "invalid speculative token constant");
6082 
6083  /* NO EREPORT(ERROR) from here till changes are logged */
6085 
6087 
6088  MarkBufferDirty(buffer);
6089 
6090  /*
6091  * Replace the speculative insertion token with a real t_ctid, pointing to
6092  * itself like it does on regular tuples.
6093  */
6094  htup->t_ctid = tuple->t_self;
6095 
6096  /* XLOG stuff */
6097  if (RelationNeedsWAL(relation))
6098  {
6099  xl_heap_confirm xlrec;
6100  XLogRecPtr recptr;
6101 
6102  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6103 
6104  XLogBeginInsert();
6105 
6106  /* We want the same filtering on this as on a plain insert */
6108 
6109  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
6110  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6111 
6112  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6113 
6114  PageSetLSN(page, recptr);
6115  }
6116 
6117  END_CRIT_SECTION();
6118 
6119  UnlockReleaseBuffer(buffer);
6120 }
OffsetNumber offnum
Definition: heapam_xlog.h:274
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:423
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:763
#define SpecTokenOffsetNumber
Definition: htup_details.h:285
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
#define REGBUF_STANDARD
Definition: xloginsert.h:34
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:277
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:681
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:505
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37
bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 6887 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

6889 {
6891  bool do_freeze;
6892  bool tuple_totally_frozen;
6893 
6894  do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi,
6895  &frz, &tuple_totally_frozen);
6896 
6897  /*
6898  * Note that because this is not a WAL-logged operation, we don't need to
6899  * fill in the offset in the freeze record.
6900  */
6901 
6902  if (do_freeze)
6903  heap_execute_freeze_tuple(tuple, &frz);
6904  return do_freeze;
6905 }
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6866
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6673
void heap_get_latest_tid ( Relation  relation,
Snapshot  snapshot,
ItemPointer  tid 
)

Definition at line 2183 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BufferGetPage, CheckForSerializableConflictOut(), elog, ERROR, HEAP_XMAX_INVALID, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility, HeapTupleUpdateXmaxMatchesXmin(), InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by currtid_byrelname(), currtid_byreloid(), and TidNext().

2186 {
2187  BlockNumber blk;
2188  ItemPointerData ctid;
2189  TransactionId priorXmax;
2190 
2191  /* this is to avoid Assert failures on bad input */
2192  if (!ItemPointerIsValid(tid))
2193  return;
2194 
2195  /*
2196  * Since this can be called with user-supplied TID, don't trust the input
2197  * too much. (RelationGetNumberOfBlocks is an expensive check, so we
2198  * don't check t_ctid links again this way. Note that it would not do to
2199  * call it just once and save the result, either.)
2200  */
2201  blk = ItemPointerGetBlockNumber(tid);
2202  if (blk >= RelationGetNumberOfBlocks(relation))
2203  elog(ERROR, "block number %u is out of range for relation \"%s\"",
2204  blk, RelationGetRelationName(relation));
2205 
2206  /*
2207  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
2208  * need to examine, and *tid is the TID we will return if ctid turns out
2209  * to be bogus.
2210  *
2211  * Note that we will loop until we reach the end of the t_ctid chain.
2212  * Depending on the snapshot passed, there might be at most one visible
2213  * version of the row, but we don't try to optimize for that.
2214  */
2215  ctid = *tid;
2216  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
2217  for (;;)
2218  {
2219  Buffer buffer;
2220  Page page;
2221  OffsetNumber offnum;
2222  ItemId lp;
2223  HeapTupleData tp;
2224  bool valid;
2225 
2226  /*
2227  * Read, pin, and lock the page.
2228  */
2229  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
2230  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2231  page = BufferGetPage(buffer);
2232  TestForOldSnapshot(snapshot, relation, page);
2233 
2234  /*
2235  * Check for bogus item number. This is not treated as an error
2236  * condition because it can happen while following a t_ctid link. We
2237  * just assume that the prior tid is OK and return it unchanged.
2238  */
2239  offnum = ItemPointerGetOffsetNumber(&ctid);
2240  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
2241  {
2242  UnlockReleaseBuffer(buffer);
2243  break;
2244  }
2245  lp = PageGetItemId(page, offnum);
2246  if (!ItemIdIsNormal(lp))
2247  {
2248  UnlockReleaseBuffer(buffer);
2249  break;
2250  }
2251 
2252  /* OK to access the tuple */
2253  tp.t_self = ctid;
2254  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2255  tp.t_len = ItemIdGetLength(lp);
2256  tp.t_tableOid = RelationGetRelid(relation);
2257 
2258  /*
2259  * After following a t_ctid link, we might arrive at an unrelated
2260  * tuple. Check for XMIN match.
2261  */
2262  if (TransactionIdIsValid(priorXmax) &&
2263  !HeapTupleUpdateXmaxMatchesXmin(priorXmax, tp.t_data))
2264  {
2265  UnlockReleaseBuffer(buffer);
2266  break;
2267  }
2268 
2269  /*
2270  * Check time qualification of tuple; if visible, set it as the new
2271  * result candidate.
2272  */
2273  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
2274  CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
2275  if (valid)
2276  *tid = ctid;
2277 
2278  /*
2279  * If there's a valid t_ctid link, follow it, else we're done.
2280  */
2281  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2284  {
2285  UnlockReleaseBuffer(buffer);
2286  break;
2287  }
2288 
2289  ctid = tp.t_data->t_ctid;
2290  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2291  UnlockReleaseBuffer(buffer);
2292  } /* end of loop */
2293 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1605
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
uint32 TransactionId
Definition: c.h:391
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3945
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:436
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
bool HeapTupleUpdateXmaxMatchesXmin(TransactionId xmax, HeapTupleHeader htup)
Definition: heapam.c:2304
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:416
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
HeapTuple heap_getnext ( HeapScanDesc  scan,
ScanDirection  direction 
)

Definition at line 1808 of file heapam.c.

References HEAPDEBUG_1, HEAPDEBUG_2, HEAPDEBUG_3, heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_rd, and HeapTupleData::t_data.

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), check_default_allows_bound(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), SeqNext(), systable_getnext(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1809 {
1810  /* Note: no locking manipulations needed */
1811 
1812  HEAPDEBUG_1; /* heap_getnext( info ) */
1813 
1814  if (scan->rs_pageatatime)
1815  heapgettup_pagemode(scan, direction,
1816  scan->rs_nkeys, scan->rs_key);
1817  else
1818  heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1819 
1820  if (scan->rs_ctup.t_data == NULL)
1821  {
1822  HEAPDEBUG_2; /* heap_getnext returning EOS */
1823  return NULL;
1824  }
1825 
1826  /*
1827  * if we get here it means we have a new current scan tuple, so point to
1828  * the proper return buffer and return the tuple.
1829  */
1830  HEAPDEBUG_3; /* heap_getnext returning tuple */
1831 
1833 
1834  return &(scan->rs_ctup);
1835 }
#define HEAPDEBUG_2
Definition: heapam.c:1802
HeapTupleData rs_ctup
Definition: relscan.h:69
HeapTupleHeader t_data
Definition: htup.h:67
bool rs_pageatatime
Definition: relscan.h:54
#define HEAPDEBUG_1
Definition: heapam.c:1801
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:481
Relation rs_rd
Definition: relscan.h:48
#define HEAPDEBUG_3
Definition: heapam.c:1803
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1265
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:785
ScanKey rs_key
Definition: relscan.h:51
bool heap_hot_search ( ItemPointer  tid,
Relation  relation,
Snapshot  snapshot,
bool all_dead 
)

Definition at line 2155 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, heap_hot_search_buffer(), ItemPointerGetBlockNumber, LockBuffer(), ReadBuffer(), and ReleaseBuffer().

Referenced by _bt_check_unique(), and unique_key_recheck().

2157 {
2158  bool result;
2159  Buffer buffer;
2160  HeapTupleData heapTuple;
2161 
2162  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2163  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2164  result = heap_hot_search_buffer(tid, relation, buffer, snapshot,
2165  &heapTuple, all_dead, true);
2166  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2167  ReleaseBuffer(buffer);
2168  return result;
2169 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:2011
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
int Buffer
Definition: buf.h:23
bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 2011 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility, HeapTupleUpdateXmaxMatchesXmin(), InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTuple(), RecentGlobalXmin, RelationGetRelid, skip(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TransactionIdIsValid.

Referenced by bitgetpage(), heap_hot_search(), and index_fetch_heap().

2014 {
2015  Page dp = (Page) BufferGetPage(buffer);
2016  TransactionId prev_xmax = InvalidTransactionId;
2017  OffsetNumber offnum;
2018  bool at_chain_start;
2019  bool valid;
2020  bool skip;
2021 
2022  /* If this is not the first call, previous call returned a (live!) tuple */
2023  if (all_dead)
2024  *all_dead = first_call;
2025 
2027 
2029  offnum = ItemPointerGetOffsetNumber(tid);
2030  at_chain_start = first_call;
2031  skip = !first_call;
2032 
2033  heapTuple->t_self = *tid;
2034 
2035  /* Scan through possible multiple members of HOT-chain */
2036  for (;;)
2037  {
2038  ItemId lp;
2039 
2040  /* check for bogus TID */
2041  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
2042  break;
2043 
2044  lp = PageGetItemId(dp, offnum);
2045 
2046  /* check for unused, dead, or redirected items */
2047  if (!ItemIdIsNormal(lp))
2048  {
2049  /* We should only see a redirect at start of chain */
2050  if (ItemIdIsRedirected(lp) && at_chain_start)
2051  {
2052  /* Follow the redirect */
2053  offnum = ItemIdGetRedirect(lp);
2054  at_chain_start = false;
2055  continue;
2056  }
2057  /* else must be end of chain */
2058  break;
2059  }
2060 
2061  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
2062  heapTuple->t_len = ItemIdGetLength(lp);
2063  heapTuple->t_tableOid = RelationGetRelid(relation);
2064  ItemPointerSetOffsetNumber(&heapTuple->t_self, offnum);
2065 
2066  /*
2067  * Shouldn't see a HEAP_ONLY tuple at chain start.
2068  */
2069  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
2070  break;
2071 
2072  /*
2073  * The xmin should match the previous xmax value, else chain is
2074  * broken.
2075  */
2076  if (TransactionIdIsValid(prev_xmax) &&
2077  !HeapTupleUpdateXmaxMatchesXmin(prev_xmax, heapTuple->t_data))
2078  break;
2079 
2080  /*
2081  * When first_call is true (and thus, skip is initially false) we'll
2082  * return the first tuple we find. But on later passes, heapTuple
2083  * will initially be pointing to the tuple we returned last time.
2084  * Returning it again would be incorrect (and would loop forever), so
2085  * we skip it and return the next match we find.
2086  */
2087  if (!skip)
2088  {
2089  /*
2090  * For the benefit of logical decoding, have t_self point at the
2091  * element of the HOT chain we're currently investigating instead
2092  * of the root tuple of the HOT chain. This is important because
2093  * the *Satisfies routine for historical mvcc snapshots needs the
2094  * correct tid to decide about the visibility in some cases.
2095  */
2096  ItemPointerSet(&(heapTuple->t_self), BufferGetBlockNumber(buffer), offnum);
2097 
2098  /* If it's visible per the snapshot, we must return it */
2099  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
2100  CheckForSerializableConflictOut(valid, relation, heapTuple,
2101  buffer, snapshot);
2102  /* reset to original, non-redirected, tid */
2103  heapTuple->t_self = *tid;
2104 
2105  if (valid)
2106  {
2107  ItemPointerSetOffsetNumber(tid, offnum);
2108  PredicateLockTuple(relation, heapTuple, snapshot);
2109  if (all_dead)
2110  *all_dead = false;
2111  return true;
2112  }
2113  }
2114  skip = false;
2115 
2116  /*
2117  * If we can't see it, maybe no one else can either. At caller
2118  * request, check whether all chain members are dead to all
2119  * transactions.
2120  *
2121  * Note: if you change the criterion here for what is "dead", fix the
2122  * planner's get_actual_variable_range() function to match.
2123  */
2124  if (all_dead && *all_dead &&
2126  *all_dead = false;
2127 
2128  /*
2129  * Check to see if HOT chain continues past this tuple; if so fetch
2130  * the next offnum and loop around.
2131  */
2132  if (HeapTupleIsHotUpdated(heapTuple))
2133  {
2136  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
2137  at_chain_start = false;
2138  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
2139  }
2140  else
2141  break; /* end of chain */
2142  }
2143 
2144  return false;
2145 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
static void skip(struct vars *v)
Definition: regc_lex.c:1109
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
uint32 TransactionId
Definition: c.h:391
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:77
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3945
bool HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin)
Definition: tqual.c:1429
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:677
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
bool HeapTupleUpdateXmaxMatchesXmin(TransactionId xmax, HeapTupleHeader htup)
Definition: heapam.c:2304
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:686
#define Assert(condition)
Definition: c.h:681
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2543
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:126
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:416
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:105
void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6290 of file heapam.c.

References buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_set_state_flags(), index_update_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

6291 {
6292  Buffer buffer;
6293  Page page;
6294  OffsetNumber offnum;
6295  ItemId lp = NULL;
6296  HeapTupleHeader htup;
6297  uint32 oldlen;
6298  uint32 newlen;
6299 
6300  /*
6301  * For now, parallel operations are required to be strictly read-only.
6302  * Unlike a regular update, this should never create a combo CID, so it
6303  * might be possible to relax this restriction, but not without more
6304  * thought and testing. It's not clear that it would be useful, anyway.
6305  */
6306  if (IsInParallelMode())
6307  ereport(ERROR,
6308  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
6309  errmsg("cannot update tuples during a parallel operation")));
6310 
6311  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6313  page = (Page) BufferGetPage(buffer);
6314 
6315  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6316  if (PageGetMaxOffsetNumber(page) >= offnum)
6317  lp = PageGetItemId(page, offnum);
6318 
6319  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6320  elog(ERROR, "invalid lp");
6321 
6322  htup = (HeapTupleHeader) PageGetItem(page, lp);
6323 
6324  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
6325  newlen = tuple->t_len - tuple->t_data->t_hoff;
6326  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6327  elog(ERROR, "wrong tuple length");
6328 
6329  /* NO EREPORT(ERROR) from here till changes are logged */
6331 
6332  memcpy((char *) htup + htup->t_hoff,
6333  (char *) tuple->t_data + tuple->t_data->t_hoff,
6334  newlen);
6335 
6336  MarkBufferDirty(buffer);
6337 
6338  /* XLOG stuff */
6339  if (RelationNeedsWAL(relation))
6340  {
6341  xl_heap_inplace xlrec;
6342  XLogRecPtr recptr;
6343 
6344  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6345 
6346  XLogBeginInsert();
6347  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
6348 
6349  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6350  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
6351 
6352  /* inplace updates aren't decoded atm, don't log the origin */
6353 
6354  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6355 
6356  PageSetLSN(page, recptr);
6357  }
6358 
6359  END_CRIT_SECTION();
6360 
6361  UnlockReleaseBuffer(buffer);
6362 
6363  /*
6364  * Send out shared cache inval if necessary. Note that because we only
6365  * pass the new version of the tuple, this mustn't be used for any
6366  * operations that could change catcache lookup keys. But we aren't
6367  * bothering with index updates either, so that's true a fortiori.
6368  */
6370  CacheInvalidateHeapTuple(relation, tuple, NULL);
6371 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define SizeOfHeapInplace
Definition: heapam_xlog.h:286
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
int errcode(int sqlerrcode)
Definition: elog.c:575
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:906
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:34
unsigned int uint32
Definition: c.h:258
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
OffsetNumber offnum
Definition: heapam_xlog.h:282
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:505
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:367
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
Oid heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 2456 of file heapam.c.

References Assert, buffer, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_SKIP_WAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), HeapTupleGetOid, InvalidBuffer, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), visibilitymap_clear(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ATRewriteTable(), CopyFrom(), ExecInsert(), intorel_receive(), simple_heap_insert(), toast_save_datum(), and transientrel_receive().

2458 {
2460  HeapTuple heaptup;
2461  Buffer buffer;
2462  Buffer vmbuffer = InvalidBuffer;
2463  bool all_visible_cleared = false;
2464 
2465  /*
2466  * Fill in tuple header fields, assign an OID, and toast the tuple if
2467  * necessary.
2468  *
2469  * Note: below this point, heaptup is the data we actually intend to store
2470  * into the relation; tup is the caller's original untoasted data.
2471  */
2472  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2473 
2474  /*
2475  * Find buffer to insert this tuple into. If the page is all visible,
2476  * this will also pin the requisite visibility map page.
2477  */
2478  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2479  InvalidBuffer, options, bistate,
2480  &vmbuffer, NULL);
2481 
2482  /*
2483  * We're about to do the actual insert -- but check for conflict first, to
2484  * avoid possibly having to roll back work we've just done.
2485  *
2486  * This is safe without a recheck as long as there is no possibility of
2487  * another process scanning the page between this check and the insert
2488  * being visible to the scan (i.e., an exclusive buffer content lock is
2489  * continuously held from this point until the tuple insert is visible).
2490  *
2491  * For a heap insert, we only need to check for table-level SSI locks. Our
2492  * new tuple can't possibly conflict with existing tuple locks, and heap
2493  * page locks are only consolidated versions of tuple locks; they do not
2494  * lock "gaps" as index page locks do. So we don't need to specify a
2495  * buffer when making the call, which makes for a faster check.
2496  */
2498 
2499  /* NO EREPORT(ERROR) from here till changes are logged */
2501 
2502  RelationPutHeapTuple(relation, buffer, heaptup,
2503  (options & HEAP_INSERT_SPECULATIVE) != 0);
2504 
2505  if (PageIsAllVisible(BufferGetPage(buffer)))
2506  {
2507  all_visible_cleared = true;
2509  visibilitymap_clear(relation,
2510  ItemPointerGetBlockNumber(&(heaptup->t_self)),
2511  vmbuffer, VISIBILITYMAP_VALID_BITS);
2512  }
2513 
2514  /*
2515  * XXX Should we set PageSetPrunable on this page ?
2516  *
2517  * The inserting transaction may eventually abort thus making this tuple
2518  * DEAD and hence available for pruning. Though we don't want to optimize
2519  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2520  * aborted tuple will never be pruned until next vacuum is triggered.
2521  *
2522  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2523  */
2524 
2525  MarkBufferDirty(buffer);
2526 
2527  /* XLOG stuff */
2528  if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
2529  {
2530  xl_heap_insert xlrec;
2531  xl_heap_header xlhdr;
2532  XLogRecPtr recptr;
2533  Page page = BufferGetPage(buffer);
2534  uint8 info = XLOG_HEAP_INSERT;
2535  int bufflags = 0;
2536 
2537  /*
2538  * If this is a catalog, we need to transmit combocids to properly
2539  * decode, so log that as well.
2540  */
2542  log_heap_new_cid(relation, heaptup);
2543 
2544  /*
2545  * If this is the single and first tuple on page, we can reinit the
2546  * page instead of restoring the whole thing. Set flag, and hide
2547  * buffer references from XLogInsert.
2548  */
2549  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2551  {
2552  info |= XLOG_HEAP_INIT_PAGE;
2553  bufflags |= REGBUF_WILL_INIT;
2554  }
2555 
2556  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2557  xlrec.flags = 0;
2558  if (all_visible_cleared)
2563 
2564  /*
2565  * For logical decoding, we need the tuple even if we're doing a full
2566  * page write, so make sure it's included even if we take a full-page
2567  * image. (XXX We could alternatively store a pointer into the FPW).
2568  */
2569  if (RelationIsLogicallyLogged(relation))
2570  {
2572  bufflags |= REGBUF_KEEP_DATA;
2573  }
2574 
2575  XLogBeginInsert();
2576  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
2577 
2578  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2579  xlhdr.t_infomask = heaptup->t_data->t_infomask;
2580  xlhdr.t_hoff = heaptup->t_data->t_hoff;
2581 
2582  /*
2583  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2584  * write the whole page to the xlog, we don't need to store
2585  * xl_heap_header in the xlog.
2586  */
2587  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2588  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2589  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2591  (char *) heaptup->t_data + SizeofHeapTupleHeader,
2592  heaptup->t_len - SizeofHeapTupleHeader);
2593 
2594  /* filtering by origin on a row level is much more efficient */
2596 
2597  recptr = XLogInsert(RM_HEAP_ID, info);
2598 
2599  PageSetLSN(page, recptr);
2600  }
2601 
2602  END_CRIT_SECTION();
2603 
2604  UnlockReleaseBuffer(buffer);
2605  if (vmbuffer != InvalidBuffer)
2606  ReleaseBuffer(vmbuffer);
2607 
2608  /*
2609  * If tuple is cachable, mark it for invalidation from the caches in case
2610  * we abort. Note it is OK to do this after releasing the buffer, because
2611  * the heaptup data structure is all in local memory, not in the shared
2612  * buffer.
2613  */
2614  CacheInvalidateHeapTuple(relation, heaptup, NULL);
2615 
2616  /* Note: speculative insertions are counted too, even if aborted later */
2617  pgstat_count_heap_insert(relation, 1);
2618 
2619  /*
2620  * If heaptup is a private copy, release it. Don't forget to copy t_self
2621  * back to the caller's image, too.
2622  */
2623  if (heaptup != tup)
2624  {
2625  tup->t_self = heaptup->t_self;
2626  heap_freetuple(heaptup);
2627  }
2628 
2629  return HeapTupleGetOid(tup);
2630 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
#define SizeofHeapTupleHeader
Definition: htup_details.h:170
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:32
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7812
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2640
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:391
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
unsigned char uint8
Definition: c.h:256
#define XLH_INSERT_IS_SPECULATIVE
Definition: heapam_xlog.h:68
#define InvalidBuffer
Definition: buf.h:25
#define REGBUF_WILL_INIT
Definition: xloginsert.h:32
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define HEAP_INSERT_SKIP_WAL
Definition: heapam.h:28
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:575
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition: hio.c:36
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4326
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:46
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:31
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:69
ItemPointerData t_self
Definition: htup.h:65
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:418
uint32 t_len
Definition: htup.h:64
#define FirstOffsetNumber
Definition: off.h:27
#define REGBUF_STANDARD
Definition: xloginsert.h:34
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other)
Definition: hio.c:297
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:559
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:37
#define PageClearAllVisible(page)
Definition: bufpage.h:385
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:681
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:505
#define SizeOfHeapInsert
Definition: heapam_xlog.h:138
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:66
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
Definition: pgstat.c:1907
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
OffsetNumber offnum
Definition: heapam_xlog.h:132
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74
HTSU_Result heap_lock_tuple ( Relation  relation,
HeapTuple  tuple,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool  follow_updates,
Buffer buffer,
HeapUpdateFailureData hufd 
)

Definition at line 4603 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), ConditionalMultiXactIdWait(), ConditionalXactLockTableWait(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, xl_heap_lock::flags, get_mxact_status_for_lock(), GetCurrentTransactionId(), GetMultiXactIdMembers(), heap_acquire_tuplock(), HEAP_KEYS_UPDATED, heap_lock_updated_tuple(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HeapTupleBeingUpdated, HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSelfUpdated, HeapTupleUpdated, HeapTupleWouldBlock, i, xl_heap_lock::infobits_set, InvalidBuffer, InvalidCommandId, ItemIdGetLength, ItemIdIsNormal, ItemPointerCopy, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), xl_heap_lock::locking_xid, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, LockWaitBlock, LockWaitError, LockWaitSkip, MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusNoKeyUpdate, xl_heap_lock::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, pfree(), ReadBuffer(), REGBUF_STANDARD, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapLock, START_CRIT_SECTION, status(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TUPLOCK_from_mxstatus, UnlockTupleTuplock, UpdateXmaxHintBits(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XactLockTableWait(), XLH_LOCK_ALL_FROZEN_CLEARED, XLOG_HEAP_LOCK, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLTW_Lock, HeapUpdateFailureData::xmax, and xmax_infomask_changed().

Referenced by EvalPlanQualFetch(), ExecLockRows(), ExecOnConflictUpdate(), GetTupleForTrigger(), RelationFindReplTupleByIndex(), and RelationFindReplTupleSeq().

4607 {
4608  HTSU_Result result;
4609  ItemPointer tid = &(tuple->t_self);
4610  ItemId lp;
4611  Page page;
4612  Buffer vmbuffer = InvalidBuffer;
4613  BlockNumber block;
4614  TransactionId xid,
4615  xmax;
4616  uint16 old_infomask,
4617  new_infomask,
4618  new_infomask2;
4619  bool first_time = true;
4620  bool have_tuple_lock = false;
4621  bool cleared_all_frozen = false;
4622 
4623  *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4624  block = ItemPointerGetBlockNumber(tid);
4625 
4626  /*
4627  * Before locking the buffer, pin the visibility map page if it appears to
4628  * be necessary. Since we haven't got the lock yet, someone else might be
4629  * in the middle of changing this, so we'll need to recheck after we have
4630  * the lock.
4631  */
4633  visibilitymap_pin(relation, block, &vmbuffer);
4634 
4636 
4637  page = BufferGetPage(*buffer);
4638  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4639  Assert(ItemIdIsNormal(lp));
4640 
4641  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4642  tuple->t_len = ItemIdGetLength(lp);
4643  tuple->t_tableOid = RelationGetRelid(relation);
4644 
4645 l3:
4646  result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4647 
4648  if (result == HeapTupleInvisible)
4649  {
4650  /*
4651  * This is possible, but only when locking a tuple for ON CONFLICT
4652  * UPDATE. We return this value here rather than throwing an error in
4653  * order to give that case the opportunity to throw a more specific
4654  * error.
4655  */
4656  result = HeapTupleInvisible;
4657  goto out_locked;
4658  }
4659  else if (result == HeapTupleBeingUpdated || result == HeapTupleUpdated)
4660  {
4661  TransactionId xwait;
4662  uint16 infomask;
4663  uint16 infomask2;
4664  bool require_sleep;
4665  ItemPointerData t_ctid;
4666 
4667  /* must copy state data before unlocking buffer */
4668  xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4669  infomask = tuple->t_data->t_infomask;
4670  infomask2 = tuple->t_data->t_infomask2;
4671  ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4672 
4674 
4675  /*
4676  * If any subtransaction of the current top transaction already holds
4677  * a lock as strong as or stronger than what we're requesting, we
4678  * effectively hold the desired lock already. We *must* succeed
4679  * without trying to take the tuple lock, else we will deadlock
4680  * against anyone wanting to acquire a stronger lock.
4681  *
4682  * Note we only do this the first time we loop on the HTSU result;
4683  * there is no point in testing in subsequent passes, because
4684  * evidently our own transaction cannot have acquired a new lock after
4685  * the first time we checked.
4686  */
4687  if (first_time)
4688  {
4689  first_time = false;
4690 
4691  if (infomask & HEAP_XMAX_IS_MULTI)
4692  {
4693  int i;
4694  int nmembers;
4695  MultiXactMember *members;
4696 
4697  /*
4698  * We don't need to allow old multixacts here; if that had
4699  * been the case, HeapTupleSatisfiesUpdate would have returned
4700  * MayBeUpdated and we wouldn't be here.
4701  */
4702  nmembers =
4703  GetMultiXactIdMembers(xwait, &members, false,
4704  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4705 
4706  for (i = 0; i < nmembers; i++)
4707  {
4708  /* only consider members of our own transaction */
4709  if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4710  continue;
4711 
4712  if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4713  {
4714  pfree(members);
4715  result = HeapTupleMayBeUpdated;
4716  goto out_unlocked;
4717  }
4718  }
4719 
4720  if (members)
4721  pfree(members);
4722  }
4723  else if (TransactionIdIsCurrentTransactionId(xwait))
4724  {
4725  switch (mode)
4726  {
4727  case LockTupleKeyShare:
4728  Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4729  HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4730  HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4731  result = HeapTupleMayBeUpdated;
4732  goto out_unlocked;
4733  case LockTupleShare:
4734  if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4735  HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4736  {
4737  result = HeapTupleMayBeUpdated;
4738  goto out_unlocked;
4739  }
4740  break;
4742  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4743  {
4744  result = HeapTupleMayBeUpdated;
4745  goto out_unlocked;
4746  }
4747  break;
4748  case LockTupleExclusive:
4749  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4750  infomask2 & HEAP_KEYS_UPDATED)
4751  {
4752  result = HeapTupleMayBeUpdated;
4753  goto out_unlocked;
4754  }
4755  break;
4756  }
4757  }
4758  }
4759 
4760  /*
4761  * Initially assume that we will have to wait for the locking
4762  * transaction(s) to finish. We check various cases below in which
4763  * this can be turned off.
4764  */
4765  require_sleep = true;
4766  if (mode == LockTupleKeyShare)
4767  {
4768  /*
4769  * If we're requesting KeyShare, and there's no update present, we
4770  * don't need to wait. Even if there is an update, we can still
4771  * continue if the key hasn't been modified.
4772  *
4773  * However, if there are updates, we need to walk the update chain
4774  * to mark future versions of the row as locked, too. That way,
4775  * if somebody deletes that future version, we're protected
4776  * against the key going away. This locking of future versions
4777  * could block momentarily, if a concurrent transaction is
4778  * deleting a key; or it could return a value to the effect that
4779  * the transaction deleting the key has already committed. So we
4780  * do this before re-locking the buffer; otherwise this would be
4781  * prone to deadlocks.
4782  *
4783  * Note that the TID we're locking was grabbed before we unlocked
4784  * the buffer. For it to change while we're not looking, the
4785  * other properties we're testing for below after re-locking the
4786  * buffer would also change, in which case we would restart this
4787  * loop above.
4788  */
4789  if (!(infomask2 & HEAP_KEYS_UPDATED))
4790  {
4791  bool updated;
4792 
4793  updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4794 
4795  /*
4796  * If there are updates, follow the update chain; bail out if
4797  * that cannot be done.
4798  */
4799  if (follow_updates && updated)
4800  {
4801  HTSU_Result res;
4802 
4803  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4805  mode);
4806  if (res != HeapTupleMayBeUpdated)
4807  {
4808  result = res;
4809  /* recovery code expects to have buffer lock held */
4811  goto failed;
4812  }
4813  }
4814 
4816 
4817  /*
4818  * Make sure it's still an appropriate lock, else start over.
4819  * Also, if it wasn't updated before we released the lock, but
4820  * is updated now, we start over too; the reason is that we
4821  * now need to follow the update chain to lock the new
4822  * versions.
4823  */
4824  if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4825  ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4826  !updated))
4827  goto l3;
4828 
4829  /* Things look okay, so we can skip sleeping */
4830  require_sleep = false;
4831 
4832  /*
4833  * Note we allow Xmax to change here; other updaters/lockers
4834  * could have modified it before we grabbed the buffer lock.
4835  * However, this is not a problem, because with the recheck we
4836  * just did we ensure that they still don't conflict with the
4837  * lock we want.
4838  */
4839  }
4840  }
4841  else if (mode == LockTupleShare)
4842  {
4843  /*
4844  * If we're requesting Share, we can similarly avoid sleeping if
4845  * there's no update and no exclusive lock present.
4846  */
4847  if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4848  !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4849  {
4851 
4852  /*
4853  * Make sure it's still an appropriate lock, else start over.
4854  * See above about allowing xmax to change.
4855  */
4856  if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4858  goto l3;
4859  require_sleep = false;
4860  }
4861  }
4862  else if (mode == LockTupleNoKeyExclusive)
4863  {
4864  /*
4865  * If we're requesting NoKeyExclusive, we might also be able to
4866  * avoid sleeping; just ensure that there no conflicting lock
4867  * already acquired.
4868  */
4869  if (infomask & HEAP_XMAX_IS_MULTI)
4870  {
4871  if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4872  mode))
4873  {
4874  /*
4875  * No conflict, but if the xmax changed under us in the
4876  * meantime, start over.
4877  */
4879  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4881  xwait))
4882  goto l3;
4883 
4884  /* otherwise, we're good */
4885  require_sleep = false;
4886  }
4887  }
4888  else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4889  {
4891 
4892  /* if the xmax changed in the meantime, start over */
4893  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4896  xwait))
4897  goto l3;
4898  /* otherwise, we're good */
4899  require_sleep = false;
4900  }
4901  }
4902 
4903  /*
4904  * As a check independent from those above, we can also avoid sleeping
4905  * if the current transaction is the sole locker of the tuple. Note
4906  * that the strength of the lock already held is irrelevant; this is
4907  * not about recording the lock in Xmax (which will be done regardless
4908  * of this optimization, below). Also, note that the cases where we
4909  * hold a lock stronger than we are requesting are already handled
4910  * above by not doing anything.
4911  *
4912  * Note we only deal with the non-multixact case here; MultiXactIdWait
4913  * is well equipped to deal with this situation on its own.
4914  */
4915  if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4917  {
4918  /* ... but if the xmax changed in the meantime, start over */
4920  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4922  xwait))
4923  goto l3;
4925  require_sleep = false;
4926  }
4927 
4928  /*
4929  * Time to sleep on the other transaction/multixact, if necessary.
4930  *
4931  * If the other transaction is an update that's already committed,
4932  * then sleeping cannot possibly do any good: if we're required to
4933  * sleep, get out to raise an error instead.
4934  *
4935  * By here, we either have already acquired the buffer exclusive lock,
4936  * or we must wait for the locking transaction or multixact; so below
4937  * we ensure that we grab buffer lock after the sleep.
4938  */
4939  if (require_sleep && result == HeapTupleUpdated)
4940  {
4942  goto failed;
4943  }
4944  else if (require_sleep)
4945  {
4946  /*
4947  * Acquire tuple lock to establish our priority for the tuple, or
4948  * die trying. LockTuple will release us when we are next-in-line
4949  * for the tuple. We must do this even if we are share-locking.
4950  *
4951  * If we are forced to "start over" below, we keep the tuple lock;
4952  * this arranges that we stay at the head of the line while
4953  * rechecking tuple state.
4954  */
4955  if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
4956  &have_tuple_lock))
4957  {
4958  /*
4959  * This can only happen if wait_policy is Skip and the lock
4960  * couldn't be obtained.
4961  */
4962  result = HeapTupleWouldBlock;
4963  /* recovery code expects to have buffer lock held */
4965  goto failed;
4966  }
4967 
4968  if (infomask & HEAP_XMAX_IS_MULTI)
4969  {
4971 
4972  /* We only ever lock tuples, never update them */
4973  if (status >= MultiXactStatusNoKeyUpdate)
4974  elog(ERROR, "invalid lock mode in heap_lock_tuple");
4975 
4976  /* wait for multixact to end, or die trying */
4977  switch (wait_policy)
4978  {
4979  case LockWaitBlock:
4980  MultiXactIdWait((MultiXactId) xwait, status, infomask,
4981  relation, &tuple->t_self, XLTW_Lock, NULL);
4982  break;
4983  case LockWaitSkip:
4985  status, infomask, relation,
4986  NULL))
4987  {
4988  result = HeapTupleWouldBlock;
4989  /* recovery code expects to have buffer lock held */
4991  goto failed;
4992  }
4993  break;
4994  case LockWaitError:
4996  status, infomask, relation,
4997  NULL))
4998  ereport(ERROR,
4999  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5000  errmsg("could not obtain lock on row in relation \"%s\"",
5001  RelationGetRelationName(relation))));
5002 
5003  break;
5004  }
5005 
5006  /*
5007  * Of course, the multixact might not be done here: if we're
5008  * requesting a light lock mode, other transactions with light
5009  * locks could still be alive, as well as locks owned by our
5010  * own xact or other subxacts of this backend. We need to
5011  * preserve the surviving MultiXact members. Note that it
5012  * isn't absolutely necessary in the latter case, but doing so
5013  * is simpler.
5014  */
5015  }
5016  else
5017  {
5018  /* wait for regular transaction to end, or die trying */
5019  switch (wait_policy)
5020  {
5021  case LockWaitBlock:
5022  XactLockTableWait(xwait, relation, &tuple->t_self,
5023  XLTW_Lock);
5024  break;
5025  case LockWaitSkip:
5026  if (!ConditionalXactLockTableWait(xwait))
5027  {
5028  result = HeapTupleWouldBlock;
5029  /* recovery code expects to have buffer lock held */
5031  goto failed;
5032  }
5033  break;
5034  case LockWaitError:
5035  if (!ConditionalXactLockTableWait(xwait))
5036  ereport(ERROR,
5037  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5038  errmsg("could not obtain lock on row in relation \"%s\"",
5039  RelationGetRelationName(relation))));
5040  break;
5041  }
5042  }
5043 
5044  /* if there are updates, follow the update chain */
5045  if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
5046  {
5047  HTSU_Result res;
5048 
5049  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
5051  mode);
5052  if (res != HeapTupleMayBeUpdated)
5053  {
5054  result = res;
5055  /* recovery code expects to have buffer lock held */
5057  goto failed;
5058  }
5059  }
5060 
5062 
5063  /*
5064  * xwait is done, but if xwait had just locked the tuple then some
5065  * other xact could update this tuple before we get to this point.
5066  * Check for xmax change, and start over if so.
5067  */
5068  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5070  xwait))
5071  goto l3;
5072 
5073  if (!(infomask & HEAP_XMAX_IS_MULTI))
5074  {
5075  /*
5076  * Otherwise check if it committed or aborted. Note we cannot
5077  * be here if the tuple was only locked by somebody who didn't
5078  * conflict with us; that would have been handled above. So
5079  * that transaction must necessarily be gone by now. But
5080  * don't check for this in the multixact case, because some
5081  * locker transactions might still be running.
5082  */
5083  UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5084  }
5085  }
5086 
5087  /* By here, we're certain that we hold buffer exclusive lock again */
5088 
5089  /*
5090  * We may lock if previous xmax aborted, or if it committed but only
5091  * locked the tuple without updating it; or if we didn't have to wait
5092  * at all for whatever reason.
5093  */
5094  if (!require_sleep ||
5095  (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5098  result = HeapTupleMayBeUpdated;
5099  else
5100  result = HeapTupleUpdated;
5101  }
5102 
5103 failed:
5104  if (result != HeapTupleMayBeUpdated)
5105  {
5106  Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated ||
5107  result == HeapTupleWouldBlock);
5108  Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5109  hufd->ctid = tuple->t_data->t_ctid;
5110  hufd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5111  if (result == HeapTupleSelfUpdated)
5112  hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5113  else
5114  hufd->cmax = InvalidCommandId;
5115  goto out_locked;
5116  }
5117 
5118  /*
5119  * If we didn't pin the visibility map page and the page has become all
5120  * visible while we were busy locking the buffer, or during some
5121  * subsequent window during which we had it unlocked, we'll have to unlock
5122  * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5123  * unfortunate, especially since we'll now have to recheck whether the
5124  * tuple has been locked or updated under us, but hopefully it won't
5125  * happen very often.
5126  */
5127  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5128  {
5130  visibilitymap_pin(relation, block, &vmbuffer);
5132  goto l3;
5133  }
5134 
5135  xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5136  old_infomask = tuple->t_data->t_infomask;
5137 
5138  /*
5139  * If this is the first possibly-multixact-able operation in the current
5140  * transaction, set my per-backend OldestMemberMXactId setting. We can be
5141  * certain that the transaction will never become a member of any older
5142  * MultiXactIds than that. (We have to do this even if we end up just
5143  * using our own TransactionId below, since some other backend could
5144  * incorporate our XID into a MultiXact immediately afterwards.)
5145  */
5147 
5148  /*
5149  * Compute the new xmax and infomask to store into the tuple. Note we do
5150  * not modify the tuple just yet, because that would leave it in the wrong
5151  * state if multixact.c elogs.
5152  */
5153  compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
5154  GetCurrentTransactionId(), mode, false,
5155  &xid, &new_infomask, &new_infomask2);
5156 
5158 
5159  /*
5160  * Store transaction information of xact locking the tuple.
5161  *
5162  * Note: Cmax is meaningless in this context, so don't set it; this avoids
5163  * possibly generating a useless combo CID. Moreover, if we're locking a
5164  * previously updated tuple, it's important to preserve the Cmax.
5165  *
5166  * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5167  * we would break the HOT chain.
5168  */
5169  tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
5170  tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5171  tuple->t_data->t_infomask |= new_infomask;
5172  tuple->t_data->t_infomask2 |= new_infomask2;
5173  if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5175  HeapTupleHeaderSetXmax(tuple->t_data, xid);
5176 
5177  /*
5178  * Make sure there is no forward chain link in t_ctid. Note that in the
5179  * cases where the tuple has been updated, we must not overwrite t_ctid,
5180  * because it was set by the updater. Moreover, if the tuple has been
5181  * updated, we need to follow the update chain to lock the new versions of
5182  * the tuple as well.
5183  */
5184  if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5185  tuple->t_data->t_ctid = *tid;
5186 
5187  /* Clear only the all-frozen bit on visibility map if needed */
5188  if (PageIsAllVisible(page) &&
5189  visibilitymap_clear(relation, block, vmbuffer,
5191  cleared_all_frozen = true;
5192 
5193 
5195 
5196  /*
5197  * XLOG stuff. You might think that we don't need an XLOG record because
5198  * there is no state change worth restoring after a crash. You would be
5199  * wrong however: we have just written either a TransactionId or a
5200  * MultiXactId that may never have been seen on disk before, and we need
5201  * to make sure that there are XLOG entries covering those ID numbers.
5202  * Else the same IDs might be re-used after a crash, which would be
5203  * disastrous if this page made it to disk before the crash. Essentially
5204  * we have to enforce the WAL log-before-data rule even in this case.
5205  * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
5206  * entries for everything anyway.)
5207  */
5208  if (RelationNeedsWAL(relation))
5209  {
5210  xl_heap_lock xlrec;
5211  XLogRecPtr recptr;
5212 
5213  XLogBeginInsert();