PostgreSQL Source Code  git master
heapam_handler.c File Reference
#include "postgres.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heaptoast.h"
#include "access/multixact.h"
#include "access/rewriteheap.h"
#include "access/syncscan.h"
#include "access/tableam.h"
#include "access/tsmapi.h"
#include "access/xact.h"
#include "catalog/catalog.h"
#include "catalog/index.h"
#include "catalog/storage.h"
#include "catalog/storage_xlog.h"
#include "commands/progress.h"
#include "executor/executor.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "storage/bufpage.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "utils/builtins.h"
#include "utils/rel.h"
Include dependency graph for heapam_handler.c:

Go to the source code of this file.

Macros

#define HEAP_OVERHEAD_BYTES_PER_TUPLE   (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))
 
#define HEAP_USABLE_BYTES_PER_PAGE   (BLCKSZ - SizeOfPageHeaderData)
 

Functions

static void reform_and_rewrite_tuple (HeapTuple tuple, Relation OldHeap, Relation NewHeap, Datum *values, bool *isnull, RewriteState rwstate)
 
static bool SampleHeapTupleVisible (TableScanDesc scan, Buffer buffer, HeapTuple tuple, OffsetNumber tupoffset)
 
static BlockNumber heapam_scan_get_blocks_done (HeapScanDesc hscan)
 
static const TupleTableSlotOpsheapam_slot_callbacks (Relation relation)
 
static IndexFetchTableDataheapam_index_fetch_begin (Relation rel)
 
static void heapam_index_fetch_reset (IndexFetchTableData *scan)
 
static void heapam_index_fetch_end (IndexFetchTableData *scan)
 
static bool heapam_index_fetch_tuple (struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
 
static bool heapam_fetch_row_version (Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot)
 
static bool heapam_tuple_tid_valid (TableScanDesc scan, ItemPointer tid)
 
static bool heapam_tuple_satisfies_snapshot (Relation rel, TupleTableSlot *slot, Snapshot snapshot)
 
static void heapam_tuple_insert (Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate)
 
static void heapam_tuple_insert_speculative (Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate, uint32 specToken)
 
static void heapam_tuple_complete_speculative (Relation relation, TupleTableSlot *slot, uint32 specToken, bool succeeded)
 
static TM_Result heapam_tuple_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
 
static TM_Result heapam_tuple_update (Relation relation, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, bool *update_indexes)
 
static TM_Result heapam_tuple_lock (Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd)
 
static void heapam_relation_set_new_filenode (Relation rel, const RelFileNode *newrnode, char persistence, TransactionId *freezeXid, MultiXactId *minmulti)
 
static void heapam_relation_nontransactional_truncate (Relation rel)
 
static void heapam_relation_copy_data (Relation rel, const RelFileNode *newrnode)
 
static void heapam_relation_copy_for_cluster (Relation OldHeap, Relation NewHeap, Relation OldIndex, bool use_sort, TransactionId OldestXmin, TransactionId *xid_cutoff, MultiXactId *multi_cutoff, double *num_tuples, double *tups_vacuumed, double *tups_recently_dead)
 
static bool heapam_scan_analyze_next_block (TableScanDesc scan, BlockNumber blockno, BufferAccessStrategy bstrategy)
 
static bool heapam_scan_analyze_next_tuple (TableScanDesc scan, TransactionId OldestXmin, double *liverows, double *deadrows, TupleTableSlot *slot)
 
static double heapam_index_build_range_scan (Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, bool allow_sync, bool anyvisible, bool progress, BlockNumber start_blockno, BlockNumber numblocks, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
 
static void heapam_index_validate_scan (Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, Snapshot snapshot, ValidateIndexState *state)
 
static bool heapam_relation_needs_toast_table (Relation rel)
 
static Oid heapam_relation_toast_am (Relation rel)
 
static void heapam_estimate_rel_size (Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
 
static bool heapam_scan_bitmap_next_block (TableScanDesc scan, TBMIterateResult *tbmres)
 
static bool heapam_scan_bitmap_next_tuple (TableScanDesc scan, TBMIterateResult *tbmres, TupleTableSlot *slot)
 
static bool heapam_scan_sample_next_block (TableScanDesc scan, SampleScanState *scanstate)
 
static bool heapam_scan_sample_next_tuple (TableScanDesc scan, SampleScanState *scanstate, TupleTableSlot *slot)
 
const TableAmRoutineGetHeapamTableAmRoutine (void)
 
Datum heap_tableam_handler (PG_FUNCTION_ARGS)
 

Variables

static const TableAmRoutine heapam_methods
 

Macro Definition Documentation

◆ HEAP_OVERHEAD_BYTES_PER_TUPLE

#define HEAP_OVERHEAD_BYTES_PER_TUPLE   (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))

Definition at line 2088 of file heapam_handler.c.

Referenced by heapam_estimate_rel_size().

◆ HEAP_USABLE_BYTES_PER_PAGE

#define HEAP_USABLE_BYTES_PER_PAGE   (BLCKSZ - SizeOfPageHeaderData)

Definition at line 2090 of file heapam_handler.c.

Referenced by heapam_estimate_rel_size().

Function Documentation

◆ GetHeapamTableAmRoutine()

const TableAmRoutine* GetHeapamTableAmRoutine ( void  )

Definition at line 2596 of file heapam_handler.c.

References heapam_methods.

Referenced by formrdesc(), heap_getnext(), and table_scan_sample_next_tuple().

2597 {
2598  return &heapam_methods;
2599 }
static const TableAmRoutine heapam_methods

◆ heap_tableam_handler()

Datum heap_tableam_handler ( PG_FUNCTION_ARGS  )

Definition at line 2602 of file heapam_handler.c.

References PG_RETURN_POINTER.

2603 {
2605 }
#define PG_RETURN_POINTER(x)
Definition: fmgr.h:361
static const TableAmRoutine heapam_methods

◆ heapam_estimate_rel_size()

static void heapam_estimate_rel_size ( Relation  rel,
int32 attr_widths,
BlockNumber pages,
double *  tuples,
double *  allvisfrac 
)
static

Definition at line 2094 of file heapam_handler.c.

References HEAP_OVERHEAD_BYTES_PER_TUPLE, HEAP_USABLE_BYTES_PER_PAGE, and table_block_relation_estimate_size().

Referenced by SampleHeapTupleVisible().

2097 {
2098  table_block_relation_estimate_size(rel, attr_widths, pages,
2099  tuples, allvisfrac,
2102 }
#define HEAP_OVERHEAD_BYTES_PER_TUPLE
#define HEAP_USABLE_BYTES_PER_PAGE
void table_block_relation_estimate_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
Definition: tableam.c:668

◆ heapam_fetch_row_version()

static bool heapam_fetch_row_version ( Relation  relation,
ItemPointer  tid,
Snapshot  snapshot,
TupleTableSlot slot 
)
static

Definition at line 180 of file heapam_handler.c.

References Assert, BufferHeapTupleTableSlot::base, ExecStorePinnedBufferHeapTuple(), heap_fetch(), RelationGetRelid, HeapTupleData::t_self, TTS_IS_BUFFERTUPLE, TupleTableSlot::tts_tableOid, and HeapTupleTableSlot::tupdata.

Referenced by SampleHeapTupleVisible().

184 {
186  Buffer buffer;
187 
188  Assert(TTS_IS_BUFFERTUPLE(slot));
189 
190  bslot->base.tupdata.t_self = *tid;
191  if (heap_fetch(relation, snapshot, &bslot->base.tupdata, &buffer))
192  {
193  /* store in slot, transferring existing pin */
194  ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata, slot, buffer);
195  slot->tts_tableOid = RelationGetRelid(relation);
196 
197  return true;
198  }
199 
200  return false;
201 }
Oid tts_tableOid
Definition: tuptable.h:131
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf)
Definition: heapam.c:1571
ItemPointerData t_self
Definition: htup.h:65
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:231
#define Assert(condition)
Definition: c.h:804
TupleTableSlot * ExecStorePinnedBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1388
HeapTupleTableSlot base
Definition: tuptable.h:259
HeapTupleData tupdata
Definition: tuptable.h:253
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:457

◆ heapam_index_build_range_scan()

static double heapam_index_build_range_scan ( Relation  heapRelation,
Relation  indexRelation,
IndexInfo indexInfo,
bool  allow_sync,
bool  anyvisible,
bool  progress,
BlockNumber  start_blockno,
BlockNumber  numblocks,
IndexBuildCallback  callback,
void *  callback_state,
TableScanDesc  scan 
)
static

Definition at line 1158 of file heapam_handler.c.

References Assert, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, callback(), CHECK_FOR_INTERRUPTS, CreateExecutorState(), ExprContext::ecxt_per_tuple_memory, ExprContext::ecxt_scantuple, elog, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, ExecDropSingleTupleTableSlot(), ExecPrepareQual(), ExecQual(), ExecStoreBufferHeapTuple(), FormIndexDatum(), ForwardScanDirection, FreeExecutorState(), GetOldestNonRemovableTransactionId(), GetPerTupleExprContext, GetTransactionSnapshot(), heap_get_root_tuples(), heap_getnext(), heap_setscanlimits(), heapam_scan_get_blocks_done(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleSatisfiesVacuum(), IndexInfo::ii_BrokenHotChain, IndexInfo::ii_Concurrent, IndexInfo::ii_ExclusionOps, IndexInfo::ii_ExpressionsState, IndexInfo::ii_Predicate, IndexInfo::ii_PredicateState, IndexInfo::ii_Unique, INDEX_MAX_KEYS, InvalidBlockNumber, InvalidOffsetNumber, InvalidTransactionId, IsBootstrapProcessingMode, IsMVCCSnapshot, IsSystemRelation(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, LockBuffer(), MaxHeapTuplesPerPage, MemoryContextReset(), NIL, OffsetNumberIsValid, OidIsValid, OldestXmin, pgstat_progress_update_param(), ParallelBlockTableScanDescData::phs_nblocks, PROGRESS_SCAN_BLOCKS_DONE, PROGRESS_SCAN_BLOCKS_TOTAL, RelationData::rd_rel, RegisterSnapshot(), RelationGetRelationName, HeapScanDescData::rs_base, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_nblocks, TableScanDescData::rs_parallel, TableScanDescData::rs_snapshot, SnapshotAny, HeapTupleData::t_data, HeapTupleData::t_self, table_beginscan_strat(), table_endscan(), table_slot_create(), TransactionIdIsCurrentTransactionId(), TransactionIdIsValid, UnregisterSnapshot(), values, WARNING, XactLockTableWait(), and XLTW_InsertIndexUnique.

Referenced by SampleHeapTupleVisible().

1169 {
1170  HeapScanDesc hscan;
1171  bool is_system_catalog;
1172  bool checking_uniqueness;
1173  HeapTuple heapTuple;
1175  bool isnull[INDEX_MAX_KEYS];
1176  double reltuples;
1177  ExprState *predicate;
1178  TupleTableSlot *slot;
1179  EState *estate;
1180  ExprContext *econtext;
1181  Snapshot snapshot;
1182  bool need_unregister_snapshot = false;
1184  BlockNumber previous_blkno = InvalidBlockNumber;
1185  BlockNumber root_blkno = InvalidBlockNumber;
1186  OffsetNumber root_offsets[MaxHeapTuplesPerPage];
1187 
1188  /*
1189  * sanity checks
1190  */
1191  Assert(OidIsValid(indexRelation->rd_rel->relam));
1192 
1193  /* Remember if it's a system catalog */
1194  is_system_catalog = IsSystemRelation(heapRelation);
1195 
1196  /* See whether we're verifying uniqueness/exclusion properties */
1197  checking_uniqueness = (indexInfo->ii_Unique ||
1198  indexInfo->ii_ExclusionOps != NULL);
1199 
1200  /*
1201  * "Any visible" mode is not compatible with uniqueness checks; make sure
1202  * only one of those is requested.
1203  */
1204  Assert(!(anyvisible && checking_uniqueness));
1205 
1206  /*
1207  * Need an EState for evaluation of index expressions and partial-index
1208  * predicates. Also a slot to hold the current tuple.
1209  */
1210  estate = CreateExecutorState();
1211  econtext = GetPerTupleExprContext(estate);
1212  slot = table_slot_create(heapRelation, NULL);
1213 
1214  /* Arrange for econtext's scan tuple to be the tuple under test */
1215  econtext->ecxt_scantuple = slot;
1216 
1217  /* Set up execution state for predicate, if any. */
1218  predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
1219 
1220  /*
1221  * Prepare for scan of the base relation. In a normal index build, we use
1222  * SnapshotAny because we must retrieve all tuples and do our own time
1223  * qual checks (because we have to index RECENTLY_DEAD tuples). In a
1224  * concurrent build, or during bootstrap, we take a regular MVCC snapshot
1225  * and index whatever's live according to that.
1226  */
1227  OldestXmin = InvalidTransactionId;
1228 
1229  /* okay to ignore lazy VACUUMs here */
1230  if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent)
1231  OldestXmin = GetOldestNonRemovableTransactionId(heapRelation);
1232 
1233  if (!scan)
1234  {
1235  /*
1236  * Serial index build.
1237  *
1238  * Must begin our own heap scan in this case. We may also need to
1239  * register a snapshot whose lifetime is under our direct control.
1240  */
1241  if (!TransactionIdIsValid(OldestXmin))
1242  {
1244  need_unregister_snapshot = true;
1245  }
1246  else
1247  snapshot = SnapshotAny;
1248 
1249  scan = table_beginscan_strat(heapRelation, /* relation */
1250  snapshot, /* snapshot */
1251  0, /* number of keys */
1252  NULL, /* scan key */
1253  true, /* buffer access strategy OK */
1254  allow_sync); /* syncscan OK? */
1255  }
1256  else
1257  {
1258  /*
1259  * Parallel index build.
1260  *
1261  * Parallel case never registers/unregisters own snapshot. Snapshot
1262  * is taken from parallel heap scan, and is SnapshotAny or an MVCC
1263  * snapshot, based on same criteria as serial case.
1264  */
1266  Assert(allow_sync);
1267  snapshot = scan->rs_snapshot;
1268  }
1269 
1270  hscan = (HeapScanDesc) scan;
1271 
1272  /*
1273  * Must have called GetOldestNonRemovableTransactionId() if using
1274  * SnapshotAny. Shouldn't have for an MVCC snapshot. (It's especially
1275  * worth checking this for parallel builds, since ambuild routines that
1276  * support parallel builds must work these details out for themselves.)
1277  */
1278  Assert(snapshot == SnapshotAny || IsMVCCSnapshot(snapshot));
1279  Assert(snapshot == SnapshotAny ? TransactionIdIsValid(OldestXmin) :
1280  !TransactionIdIsValid(OldestXmin));
1281  Assert(snapshot == SnapshotAny || !anyvisible);
1282 
1283  /* Publish number of blocks to scan */
1284  if (progress)
1285  {
1286  BlockNumber nblocks;
1287 
1288  if (hscan->rs_base.rs_parallel != NULL)
1289  {
1291 
1293  nblocks = pbscan->phs_nblocks;
1294  }
1295  else
1296  nblocks = hscan->rs_nblocks;
1297 
1299  nblocks);
1300  }
1301 
1302  /* set our scan endpoints */
1303  if (!allow_sync)
1304  heap_setscanlimits(scan, start_blockno, numblocks);
1305  else
1306  {
1307  /* syncscan can only be requested on whole relation */
1308  Assert(start_blockno == 0);
1309  Assert(numblocks == InvalidBlockNumber);
1310  }
1311 
1312  reltuples = 0;
1313 
1314  /*
1315  * Scan all tuples in the base relation.
1316  */
1317  while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
1318  {
1319  bool tupleIsAlive;
1320 
1322 
1323  /* Report scan progress, if asked to. */
1324  if (progress)
1325  {
1326  BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
1327 
1328  if (blocks_done != previous_blkno)
1329  {
1331  blocks_done);
1332  previous_blkno = blocks_done;
1333  }
1334  }
1335 
1336  /*
1337  * When dealing with a HOT-chain of updated tuples, we want to index
1338  * the values of the live tuple (if any), but index it under the TID
1339  * of the chain's root tuple. This approach is necessary to preserve
1340  * the HOT-chain structure in the heap. So we need to be able to find
1341  * the root item offset for every tuple that's in a HOT-chain. When
1342  * first reaching a new page of the relation, call
1343  * heap_get_root_tuples() to build a map of root item offsets on the
1344  * page.
1345  *
1346  * It might look unsafe to use this information across buffer
1347  * lock/unlock. However, we hold ShareLock on the table so no
1348  * ordinary insert/update/delete should occur; and we hold pin on the
1349  * buffer continuously while visiting the page, so no pruning
1350  * operation can occur either.
1351  *
1352  * In cases with only ShareUpdateExclusiveLock on the table, it's
1353  * possible for some HOT tuples to appear that we didn't know about
1354  * when we first read the page. To handle that case, we re-obtain the
1355  * list of root offsets when a HOT tuple points to a root item that we
1356  * don't know about.
1357  *
1358  * Also, although our opinions about tuple liveness could change while
1359  * we scan the page (due to concurrent transaction commits/aborts),
1360  * the chain root locations won't, so this info doesn't need to be
1361  * rebuilt after waiting for another transaction.
1362  *
1363  * Note the implied assumption that there is no more than one live
1364  * tuple per HOT-chain --- else we could create more than one index
1365  * entry pointing to the same root tuple.
1366  */
1367  if (hscan->rs_cblock != root_blkno)
1368  {
1369  Page page = BufferGetPage(hscan->rs_cbuf);
1370 
1372  heap_get_root_tuples(page, root_offsets);
1374 
1375  root_blkno = hscan->rs_cblock;
1376  }
1377 
1378  if (snapshot == SnapshotAny)
1379  {
1380  /* do our own time qual check */
1381  bool indexIt;
1382  TransactionId xwait;
1383 
1384  recheck:
1385 
1386  /*
1387  * We could possibly get away with not locking the buffer here,
1388  * since caller should hold ShareLock on the relation, but let's
1389  * be conservative about it. (This remark is still correct even
1390  * with HOT-pruning: our pin on the buffer prevents pruning.)
1391  */
1393 
1394  /*
1395  * The criteria for counting a tuple as live in this block need to
1396  * match what analyze.c's heapam_scan_analyze_next_tuple() does,
1397  * otherwise CREATE INDEX and ANALYZE may produce wildly different
1398  * reltuples values, e.g. when there are many recently-dead
1399  * tuples.
1400  */
1401  switch (HeapTupleSatisfiesVacuum(heapTuple, OldestXmin,
1402  hscan->rs_cbuf))
1403  {
1404  case HEAPTUPLE_DEAD:
1405  /* Definitely dead, we can ignore it */
1406  indexIt = false;
1407  tupleIsAlive = false;
1408  break;
1409  case HEAPTUPLE_LIVE:
1410  /* Normal case, index and unique-check it */
1411  indexIt = true;
1412  tupleIsAlive = true;
1413  /* Count it as live, too */
1414  reltuples += 1;
1415  break;
1417 
1418  /*
1419  * If tuple is recently deleted then we must index it
1420  * anyway to preserve MVCC semantics. (Pre-existing
1421  * transactions could try to use the index after we finish
1422  * building it, and may need to see such tuples.)
1423  *
1424  * However, if it was HOT-updated then we must only index
1425  * the live tuple at the end of the HOT-chain. Since this
1426  * breaks semantics for pre-existing snapshots, mark the
1427  * index as unusable for them.
1428  *
1429  * We don't count recently-dead tuples in reltuples, even
1430  * if we index them; see heapam_scan_analyze_next_tuple().
1431  */
1432  if (HeapTupleIsHotUpdated(heapTuple))
1433  {
1434  indexIt = false;
1435  /* mark the index as unsafe for old snapshots */
1436  indexInfo->ii_BrokenHotChain = true;
1437  }
1438  else
1439  indexIt = true;
1440  /* In any case, exclude the tuple from unique-checking */
1441  tupleIsAlive = false;
1442  break;
1444 
1445  /*
1446  * In "anyvisible" mode, this tuple is visible and we
1447  * don't need any further checks.
1448  */
1449  if (anyvisible)
1450  {
1451  indexIt = true;
1452  tupleIsAlive = true;
1453  reltuples += 1;
1454  break;
1455  }
1456 
1457  /*
1458  * Since caller should hold ShareLock or better, normally
1459  * the only way to see this is if it was inserted earlier
1460  * in our own transaction. However, it can happen in
1461  * system catalogs, since we tend to release write lock
1462  * before commit there. Give a warning if neither case
1463  * applies.
1464  */
1465  xwait = HeapTupleHeaderGetXmin(heapTuple->t_data);
1467  {
1468  if (!is_system_catalog)
1469  elog(WARNING, "concurrent insert in progress within table \"%s\"",
1470  RelationGetRelationName(heapRelation));
1471 
1472  /*
1473  * If we are performing uniqueness checks, indexing
1474  * such a tuple could lead to a bogus uniqueness
1475  * failure. In that case we wait for the inserting
1476  * transaction to finish and check again.
1477  */
1478  if (checking_uniqueness)
1479  {
1480  /*
1481  * Must drop the lock on the buffer before we wait
1482  */
1484  XactLockTableWait(xwait, heapRelation,
1485  &heapTuple->t_self,
1488  goto recheck;
1489  }
1490  }
1491  else
1492  {
1493  /*
1494  * For consistency with
1495  * heapam_scan_analyze_next_tuple(), count
1496  * HEAPTUPLE_INSERT_IN_PROGRESS tuples as live only
1497  * when inserted by our own transaction.
1498  */
1499  reltuples += 1;
1500  }
1501 
1502  /*
1503  * We must index such tuples, since if the index build
1504  * commits then they're good.
1505  */
1506  indexIt = true;
1507  tupleIsAlive = true;
1508  break;
1510 
1511  /*
1512  * As with INSERT_IN_PROGRESS case, this is unexpected
1513  * unless it's our own deletion or a system catalog; but
1514  * in anyvisible mode, this tuple is visible.
1515  */
1516  if (anyvisible)
1517  {
1518  indexIt = true;
1519  tupleIsAlive = false;
1520  reltuples += 1;
1521  break;
1522  }
1523 
1524  xwait = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1526  {
1527  if (!is_system_catalog)
1528  elog(WARNING, "concurrent delete in progress within table \"%s\"",
1529  RelationGetRelationName(heapRelation));
1530 
1531  /*
1532  * If we are performing uniqueness checks, assuming
1533  * the tuple is dead could lead to missing a
1534  * uniqueness violation. In that case we wait for the
1535  * deleting transaction to finish and check again.
1536  *
1537  * Also, if it's a HOT-updated tuple, we should not
1538  * index it but rather the live tuple at the end of
1539  * the HOT-chain. However, the deleting transaction
1540  * could abort, possibly leaving this tuple as live
1541  * after all, in which case it has to be indexed. The
1542  * only way to know what to do is to wait for the
1543  * deleting transaction to finish and check again.
1544  */
1545  if (checking_uniqueness ||
1546  HeapTupleIsHotUpdated(heapTuple))
1547  {
1548  /*
1549  * Must drop the lock on the buffer before we wait
1550  */
1552  XactLockTableWait(xwait, heapRelation,
1553  &heapTuple->t_self,
1556  goto recheck;
1557  }
1558 
1559  /*
1560  * Otherwise index it but don't check for uniqueness,
1561  * the same as a RECENTLY_DEAD tuple.
1562  */
1563  indexIt = true;
1564 
1565  /*
1566  * Count HEAPTUPLE_DELETE_IN_PROGRESS tuples as live,
1567  * if they were not deleted by the current
1568  * transaction. That's what
1569  * heapam_scan_analyze_next_tuple() does, and we want
1570  * the behavior to be consistent.
1571  */
1572  reltuples += 1;
1573  }
1574  else if (HeapTupleIsHotUpdated(heapTuple))
1575  {
1576  /*
1577  * It's a HOT-updated tuple deleted by our own xact.
1578  * We can assume the deletion will commit (else the
1579  * index contents don't matter), so treat the same as
1580  * RECENTLY_DEAD HOT-updated tuples.
1581  */
1582  indexIt = false;
1583  /* mark the index as unsafe for old snapshots */
1584  indexInfo->ii_BrokenHotChain = true;
1585  }
1586  else
1587  {
1588  /*
1589  * It's a regular tuple deleted by our own xact. Index
1590  * it, but don't check for uniqueness nor count in
1591  * reltuples, the same as a RECENTLY_DEAD tuple.
1592  */
1593  indexIt = true;
1594  }
1595  /* In any case, exclude the tuple from unique-checking */
1596  tupleIsAlive = false;
1597  break;
1598  default:
1599  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1600  indexIt = tupleIsAlive = false; /* keep compiler quiet */
1601  break;
1602  }
1603 
1605 
1606  if (!indexIt)
1607  continue;
1608  }
1609  else
1610  {
1611  /* heap_getnext did the time qual check */
1612  tupleIsAlive = true;
1613  reltuples += 1;
1614  }
1615 
1617 
1618  /* Set up for predicate or expression evaluation */
1619  ExecStoreBufferHeapTuple(heapTuple, slot, hscan->rs_cbuf);
1620 
1621  /*
1622  * In a partial index, discard tuples that don't satisfy the
1623  * predicate.
1624  */
1625  if (predicate != NULL)
1626  {
1627  if (!ExecQual(predicate, econtext))
1628  continue;
1629  }
1630 
1631  /*
1632  * For the current heap tuple, extract all the attributes we use in
1633  * this index, and note which are null. This also performs evaluation
1634  * of any expressions needed.
1635  */
1636  FormIndexDatum(indexInfo,
1637  slot,
1638  estate,
1639  values,
1640  isnull);
1641 
1642  /*
1643  * You'd think we should go ahead and build the index tuple here, but
1644  * some index AMs want to do further processing on the data first. So
1645  * pass the values[] and isnull[] arrays, instead.
1646  */
1647 
1648  if (HeapTupleIsHeapOnly(heapTuple))
1649  {
1650  /*
1651  * For a heap-only tuple, pretend its TID is that of the root. See
1652  * src/backend/access/heap/README.HOT for discussion.
1653  */
1654  ItemPointerData tid;
1655  OffsetNumber offnum;
1656 
1657  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
1658 
1659  /*
1660  * If a HOT tuple points to a root that we don't know
1661  * about, obtain root items afresh. If that still fails,
1662  * report it as corruption.
1663  */
1664  if (root_offsets[offnum - 1] == InvalidOffsetNumber)
1665  {
1666  Page page = BufferGetPage(hscan->rs_cbuf);
1667 
1669  heap_get_root_tuples(page, root_offsets);
1671  }
1672 
1673  if (!OffsetNumberIsValid(root_offsets[offnum - 1]))
1674  ereport(ERROR,
1676  errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
1677  ItemPointerGetBlockNumber(&heapTuple->t_self),
1678  offnum,
1679  RelationGetRelationName(heapRelation))));
1680 
1681  ItemPointerSet(&tid, ItemPointerGetBlockNumber(&heapTuple->t_self),
1682  root_offsets[offnum - 1]);
1683 
1684  /* Call the AM's callback routine to process the tuple */
1685  callback(indexRelation, &tid, values, isnull, tupleIsAlive,
1686  callback_state);
1687  }
1688  else
1689  {
1690  /* Call the AM's callback routine to process the tuple */
1691  callback(indexRelation, &heapTuple->t_self, values, isnull,
1692  tupleIsAlive, callback_state);
1693  }
1694  }
1695 
1696  /* Report scan progress one last time. */
1697  if (progress)
1698  {
1699  BlockNumber blks_done;
1700 
1701  if (hscan->rs_base.rs_parallel != NULL)
1702  {
1704 
1706  blks_done = pbscan->phs_nblocks;
1707  }
1708  else
1709  blks_done = hscan->rs_nblocks;
1710 
1712  blks_done);
1713  }
1714 
1715  table_endscan(scan);
1716 
1717  /* we can now forget our snapshot, if set and registered by us */
1718  if (need_unregister_snapshot)
1719  UnregisterSnapshot(snapshot);
1720 
1722 
1723  FreeExecutorState(estate);
1724 
1725  /* These may have been pointing to the now-gone estate */
1726  indexInfo->ii_ExpressionsState = NIL;
1727  indexInfo->ii_PredicateState = NULL;
1728 
1729  return reltuples;
1730 }
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:91
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
void FormIndexDatum(IndexInfo *indexInfo, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
Definition: index.c:2816
#define NIL
Definition: pg_list.h:65
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
BlockNumber rs_cblock
Definition: heapam.h:59
List * ii_Predicate
Definition: execnodes.h:162
bool IsSystemRelation(Relation relation)
Definition: catalog.c:66
uint32 TransactionId
Definition: c.h:587
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:810
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:86
TableScanDescData rs_base
Definition: heapam.h:49
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3478
ExprState * ii_PredicateState
Definition: execnodes.h:163
MemoryContext ecxt_per_tuple_memory
Definition: execnodes.h:233
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
int errcode(int sqlerrcode)
Definition: elog.c:694
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:137
uint32 BlockNumber
Definition: block.h:31
Form_pg_class rd_rel
Definition: rel.h:110
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition: executor.h:372
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:250
#define OidIsValid(objectId)
Definition: c.h:710
static TableScanDesc table_beginscan_strat(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, bool allow_strat, bool allow_sync)
Definition: tableam.h:907
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
void FreeExecutorState(EState *estate)
Definition: execUtils.c:186
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:676
#define GetPerTupleExprContext(estate)
Definition: executor.h:509
List * ii_ExpressionsState
Definition: execnodes.h:161
TransactionId GetOldestNonRemovableTransactionId(Relation rel)
Definition: procarray.c:1939
#define ERROR
Definition: elog.h:45
ItemPointerData t_self
Definition: htup.h:65
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition: heapam.c:1316
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
Definition: heapam.c:348
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:491
ExprState * ExecPrepareQual(List *qual, EState *estate)
Definition: execExpr.c:520
static TransactionId OldestXmin
Definition: vacuumlazy.c:335
bool ii_BrokenHotChain
Definition: execnodes.h:174
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
void heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
Definition: pruneheap.c:884
EState * CreateExecutorState(void)
Definition: execUtils.c:90
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:852
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
#define WARNING
Definition: elog.h:40
int progress
Definition: pgbench.c:235
#define PROGRESS_SCAN_BLOCKS_DONE
Definition: progress.h:120
uintptr_t Datum
Definition: postgres.h:367
BlockNumber rs_nblocks
Definition: heapam.h:52
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define InvalidOffsetNumber
Definition: off.h:26
#define ereport(elevel,...)
Definition: elog.h:155
Buffer rs_cbuf
Definition: heapam.h:60
int errmsg_internal(const char *fmt,...)
Definition: elog.c:992
bool ii_Unique
Definition: execnodes.h:171
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:639
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
#define Assert(condition)
Definition: c.h:804
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define INDEX_MAX_KEYS
#define InvalidBlockNumber
Definition: block.h:33
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:225
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define PROGRESS_SCAN_BLOCKS_TOTAL
Definition: progress.h:119
bool ii_Concurrent
Definition: execnodes.h:173
#define SnapshotAny
Definition: snapmgr.h:68
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:991
static Datum values[MAXATTR]
Definition: bootstrap.c:165
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:394
Oid * ii_ExclusionOps
Definition: execnodes.h:164
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
#define elog(elevel,...)
Definition: elog.h:227
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:50
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:100
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static BlockNumber heapam_scan_get_blocks_done(HeapScanDesc hscan)
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127

◆ heapam_index_fetch_begin()

static IndexFetchTableData* heapam_index_fetch_begin ( Relation  rel)
static

Definition at line 79 of file heapam_handler.c.

References InvalidBuffer, palloc0(), IndexFetchTableData::rel, IndexFetchHeapData::xs_base, and IndexFetchHeapData::xs_cbuf.

Referenced by SampleHeapTupleVisible().

80 {
82 
83  hscan->xs_base.rel = rel;
84  hscan->xs_cbuf = InvalidBuffer;
85 
86  return &hscan->xs_base;
87 }
#define InvalidBuffer
Definition: buf.h:25
Buffer xs_cbuf
Definition: heapam.h:82
void * palloc0(Size size)
Definition: mcxt.c:981
IndexFetchTableData xs_base
Definition: heapam.h:80

◆ heapam_index_fetch_end()

static void heapam_index_fetch_end ( IndexFetchTableData scan)
static

Definition at line 102 of file heapam_handler.c.

References heapam_index_fetch_reset(), and pfree().

Referenced by SampleHeapTupleVisible().

103 {
104  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
105 
107 
108  pfree(hscan);
109 }
static void heapam_index_fetch_reset(IndexFetchTableData *scan)
void pfree(void *pointer)
Definition: mcxt.c:1057

◆ heapam_index_fetch_reset()

static void heapam_index_fetch_reset ( IndexFetchTableData scan)
static

Definition at line 90 of file heapam_handler.c.

References BufferIsValid, InvalidBuffer, ReleaseBuffer(), and IndexFetchHeapData::xs_cbuf.

Referenced by heapam_index_fetch_end(), and SampleHeapTupleVisible().

91 {
92  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
93 
94  if (BufferIsValid(hscan->xs_cbuf))
95  {
96  ReleaseBuffer(hscan->xs_cbuf);
97  hscan->xs_cbuf = InvalidBuffer;
98  }
99 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3700
Buffer xs_cbuf
Definition: heapam.h:82
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123

◆ heapam_index_fetch_tuple()

static bool heapam_index_fetch_tuple ( struct IndexFetchTableData scan,
ItemPointer  tid,
Snapshot  snapshot,
TupleTableSlot slot,
bool call_again,
bool all_dead 
)
static

Definition at line 112 of file heapam_handler.c.

References Assert, BufferHeapTupleTableSlot::base, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, ExecStoreBufferHeapTuple(), heap_hot_search_buffer(), heap_page_prune_opt(), IsMVCCSnapshot, ItemPointerGetBlockNumber, LockBuffer(), IndexFetchTableData::rel, RelationGetRelid, ReleaseAndReadBuffer(), HeapTupleData::t_self, TTS_IS_BUFFERTUPLE, TupleTableSlot::tts_tableOid, HeapTupleTableSlot::tupdata, IndexFetchHeapData::xs_base, and IndexFetchHeapData::xs_cbuf.

Referenced by SampleHeapTupleVisible().

117 {
118  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
120  bool got_heap_tuple;
121 
122  Assert(TTS_IS_BUFFERTUPLE(slot));
123 
124  /* We can skip the buffer-switching logic if we're in mid-HOT chain. */
125  if (!*call_again)
126  {
127  /* Switch to correct buffer if we don't have it already */
128  Buffer prev_buf = hscan->xs_cbuf;
129 
130  hscan->xs_cbuf = ReleaseAndReadBuffer(hscan->xs_cbuf,
131  hscan->xs_base.rel,
133 
134  /*
135  * Prune page, but only if we weren't already on this page
136  */
137  if (prev_buf != hscan->xs_cbuf)
138  heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf);
139  }
140 
141  /* Obtain share-lock on the buffer so we can examine visibility */
143  got_heap_tuple = heap_hot_search_buffer(tid,
144  hscan->xs_base.rel,
145  hscan->xs_cbuf,
146  snapshot,
147  &bslot->base.tupdata,
148  all_dead,
149  !*call_again);
150  bslot->base.tupdata.t_self = *tid;
152 
153  if (got_heap_tuple)
154  {
155  /*
156  * Only in a non-MVCC snapshot can more than one member of the HOT
157  * chain be visible.
158  */
159  *call_again = !IsMVCCSnapshot(snapshot);
160 
161  slot->tts_tableOid = RelationGetRelid(scan->rel);
162  ExecStoreBufferHeapTuple(&bslot->base.tupdata, slot, hscan->xs_cbuf);
163  }
164  else
165  {
166  /* We've reached the end of the HOT chain. */
167  *call_again = false;
168  }
169 
170  return got_heap_tuple;
171 }
Oid tts_tableOid
Definition: tuptable.h:131
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:1686
ItemPointerData t_self
Definition: htup.h:65
Buffer xs_cbuf
Definition: heapam.h:82
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define Assert(condition)
Definition: c.h:804
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:1546
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
HeapTupleTableSlot base
Definition: tuptable.h:259
void heap_page_prune_opt(Relation relation, Buffer buffer)
Definition: pruneheap.c:87
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
HeapTupleData tupdata
Definition: tuptable.h:253
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:457
IndexFetchTableData xs_base
Definition: heapam.h:80

◆ heapam_index_validate_scan()

static void heapam_index_validate_scan ( Relation  heapRelation,
Relation  indexRelation,
IndexInfo indexInfo,
Snapshot  snapshot,
ValidateIndexState state 
)
static

Definition at line 1733 of file heapam_handler.c.

References Assert, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CHECK_FOR_INTERRUPTS, CreateExecutorState(), DatumGetInt64, DatumGetPointer, ExprContext::ecxt_per_tuple_memory, ExprContext::ecxt_scantuple, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, ExecDropSingleTupleTableSlot(), ExecPrepareQual(), ExecQual(), ExecStoreHeapTuple(), FormIndexDatum(), ForwardScanDirection, FreeExecutorState(), GetPerTupleExprContext, heap_get_root_tuples(), heap_getnext(), HeapTupleIsHeapOnly, ValidateIndexState::htups, IndexInfo::ii_ExpressionsState, IndexInfo::ii_Predicate, IndexInfo::ii_PredicateState, IndexInfo::ii_Unique, index_insert(), INDEX_MAX_KEYS, InvalidBlockNumber, ItemPointerCompare(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSetOffsetNumber, itemptr_decode(), LockBuffer(), MakeSingleTupleTableSlot(), MaxHeapTuplesPerPage, MemoryContextReset(), NIL, OffsetNumberIsValid, OidIsValid, pfree(), pgstat_progress_update_param(), PROGRESS_SCAN_BLOCKS_DONE, PROGRESS_SCAN_BLOCKS_TOTAL, RelationData::rd_rel, RelationGetDescr, RelationGetRelationName, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_nblocks, HeapTupleData::t_self, table_beginscan_strat(), table_endscan(), TTSOpsHeapTuple, ValidateIndexState::tuplesort, tuplesort_getdatum(), ValidateIndexState::tups_inserted, UNIQUE_CHECK_NO, UNIQUE_CHECK_YES, and values.

Referenced by SampleHeapTupleVisible().

1738 {
1739  TableScanDesc scan;
1740  HeapScanDesc hscan;
1741  HeapTuple heapTuple;
1743  bool isnull[INDEX_MAX_KEYS];
1744  ExprState *predicate;
1745  TupleTableSlot *slot;
1746  EState *estate;
1747  ExprContext *econtext;
1748  BlockNumber root_blkno = InvalidBlockNumber;
1749  OffsetNumber root_offsets[MaxHeapTuplesPerPage];
1750  bool in_index[MaxHeapTuplesPerPage];
1751  BlockNumber previous_blkno = InvalidBlockNumber;
1752 
1753  /* state variables for the merge */
1754  ItemPointer indexcursor = NULL;
1755  ItemPointerData decoded;
1756  bool tuplesort_empty = false;
1757 
1758  /*
1759  * sanity checks
1760  */
1761  Assert(OidIsValid(indexRelation->rd_rel->relam));
1762 
1763  /*
1764  * Need an EState for evaluation of index expressions and partial-index
1765  * predicates. Also a slot to hold the current tuple.
1766  */
1767  estate = CreateExecutorState();
1768  econtext = GetPerTupleExprContext(estate);
1769  slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation),
1770  &TTSOpsHeapTuple);
1771 
1772  /* Arrange for econtext's scan tuple to be the tuple under test */
1773  econtext->ecxt_scantuple = slot;
1774 
1775  /* Set up execution state for predicate, if any. */
1776  predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
1777 
1778  /*
1779  * Prepare for scan of the base relation. We need just those tuples
1780  * satisfying the passed-in reference snapshot. We must disable syncscan
1781  * here, because it's critical that we read from block zero forward to
1782  * match the sorted TIDs.
1783  */
1784  scan = table_beginscan_strat(heapRelation, /* relation */
1785  snapshot, /* snapshot */
1786  0, /* number of keys */
1787  NULL, /* scan key */
1788  true, /* buffer access strategy OK */
1789  false); /* syncscan not OK */
1790  hscan = (HeapScanDesc) scan;
1791 
1793  hscan->rs_nblocks);
1794 
1795  /*
1796  * Scan all tuples matching the snapshot.
1797  */
1798  while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
1799  {
1800  ItemPointer heapcursor = &heapTuple->t_self;
1801  ItemPointerData rootTuple;
1802  OffsetNumber root_offnum;
1803 
1805 
1806  state->htups += 1;
1807 
1808  if ((previous_blkno == InvalidBlockNumber) ||
1809  (hscan->rs_cblock != previous_blkno))
1810  {
1812  hscan->rs_cblock);
1813  previous_blkno = hscan->rs_cblock;
1814  }
1815 
1816  /*
1817  * As commented in table_index_build_scan, we should index heap-only
1818  * tuples under the TIDs of their root tuples; so when we advance onto
1819  * a new heap page, build a map of root item offsets on the page.
1820  *
1821  * This complicates merging against the tuplesort output: we will
1822  * visit the live tuples in order by their offsets, but the root
1823  * offsets that we need to compare against the index contents might be
1824  * ordered differently. So we might have to "look back" within the
1825  * tuplesort output, but only within the current page. We handle that
1826  * by keeping a bool array in_index[] showing all the
1827  * already-passed-over tuplesort output TIDs of the current page. We
1828  * clear that array here, when advancing onto a new heap page.
1829  */
1830  if (hscan->rs_cblock != root_blkno)
1831  {
1832  Page page = BufferGetPage(hscan->rs_cbuf);
1833 
1835  heap_get_root_tuples(page, root_offsets);
1837 
1838  memset(in_index, 0, sizeof(in_index));
1839 
1840  root_blkno = hscan->rs_cblock;
1841  }
1842 
1843  /* Convert actual tuple TID to root TID */
1844  rootTuple = *heapcursor;
1845  root_offnum = ItemPointerGetOffsetNumber(heapcursor);
1846 
1847  if (HeapTupleIsHeapOnly(heapTuple))
1848  {
1849  root_offnum = root_offsets[root_offnum - 1];
1850  if (!OffsetNumberIsValid(root_offnum))
1851  ereport(ERROR,
1853  errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
1854  ItemPointerGetBlockNumber(heapcursor),
1855  ItemPointerGetOffsetNumber(heapcursor),
1856  RelationGetRelationName(heapRelation))));
1857  ItemPointerSetOffsetNumber(&rootTuple, root_offnum);
1858  }
1859 
1860  /*
1861  * "merge" by skipping through the index tuples until we find or pass
1862  * the current root tuple.
1863  */
1864  while (!tuplesort_empty &&
1865  (!indexcursor ||
1866  ItemPointerCompare(indexcursor, &rootTuple) < 0))
1867  {
1868  Datum ts_val;
1869  bool ts_isnull;
1870 
1871  if (indexcursor)
1872  {
1873  /*
1874  * Remember index items seen earlier on the current heap page
1875  */
1876  if (ItemPointerGetBlockNumber(indexcursor) == root_blkno)
1877  in_index[ItemPointerGetOffsetNumber(indexcursor) - 1] = true;
1878  }
1879 
1880  tuplesort_empty = !tuplesort_getdatum(state->tuplesort, true,
1881  &ts_val, &ts_isnull, NULL);
1882  Assert(tuplesort_empty || !ts_isnull);
1883  if (!tuplesort_empty)
1884  {
1885  itemptr_decode(&decoded, DatumGetInt64(ts_val));
1886  indexcursor = &decoded;
1887 
1888  /* If int8 is pass-by-ref, free (encoded) TID Datum memory */
1889 #ifndef USE_FLOAT8_BYVAL
1890  pfree(DatumGetPointer(ts_val));
1891 #endif
1892  }
1893  else
1894  {
1895  /* Be tidy */
1896  indexcursor = NULL;
1897  }
1898  }
1899 
1900  /*
1901  * If the tuplesort has overshot *and* we didn't see a match earlier,
1902  * then this tuple is missing from the index, so insert it.
1903  */
1904  if ((tuplesort_empty ||
1905  ItemPointerCompare(indexcursor, &rootTuple) > 0) &&
1906  !in_index[root_offnum - 1])
1907  {
1909 
1910  /* Set up for predicate or expression evaluation */
1911  ExecStoreHeapTuple(heapTuple, slot, false);
1912 
1913  /*
1914  * In a partial index, discard tuples that don't satisfy the
1915  * predicate.
1916  */
1917  if (predicate != NULL)
1918  {
1919  if (!ExecQual(predicate, econtext))
1920  continue;
1921  }
1922 
1923  /*
1924  * For the current heap tuple, extract all the attributes we use
1925  * in this index, and note which are null. This also performs
1926  * evaluation of any expressions needed.
1927  */
1928  FormIndexDatum(indexInfo,
1929  slot,
1930  estate,
1931  values,
1932  isnull);
1933 
1934  /*
1935  * You'd think we should go ahead and build the index tuple here,
1936  * but some index AMs want to do further processing on the data
1937  * first. So pass the values[] and isnull[] arrays, instead.
1938  */
1939 
1940  /*
1941  * If the tuple is already committed dead, you might think we
1942  * could suppress uniqueness checking, but this is no longer true
1943  * in the presence of HOT, because the insert is actually a proxy
1944  * for a uniqueness check on the whole HOT-chain. That is, the
1945  * tuple we have here could be dead because it was already
1946  * HOT-updated, and if so the updating transaction will not have
1947  * thought it should insert index entries. The index AM will
1948  * check the whole HOT-chain and correctly detect a conflict if
1949  * there is one.
1950  */
1951 
1952  index_insert(indexRelation,
1953  values,
1954  isnull,
1955  &rootTuple,
1956  heapRelation,
1957  indexInfo->ii_Unique ?
1959  false,
1960  indexInfo);
1961 
1962  state->tups_inserted += 1;
1963  }
1964  }
1965 
1966  table_endscan(scan);
1967 
1969 
1970  FreeExecutorState(estate);
1971 
1972  /* These may have been pointing to the now-gone estate */
1973  indexInfo->ii_ExpressionsState = NIL;
1974  indexInfo->ii_PredicateState = NULL;
1975 }
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
void FormIndexDatum(IndexInfo *indexInfo, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
Definition: index.c:2816
#define NIL
Definition: pg_list.h:65
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
bool tuplesort_getdatum(Tuplesortstate *state, bool forward, Datum *val, bool *isNull, Datum *abbrev)
Definition: tuplesort.c:2494
BlockNumber rs_cblock
Definition: heapam.h:59
List * ii_Predicate
Definition: execnodes.h:162
#define RelationGetDescr(relation)
Definition: rel.h:483
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1208
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3478
ExprState * ii_PredicateState
Definition: execnodes.h:163
MemoryContext ecxt_per_tuple_memory
Definition: execnodes.h:233
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
int errcode(int sqlerrcode)
Definition: elog.c:694
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:137
uint32 BlockNumber
Definition: block.h:31
Form_pg_class rd_rel
Definition: rel.h:110
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition: executor.h:372
#define OidIsValid(objectId)
Definition: c.h:710
static TableScanDesc table_beginscan_strat(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, bool allow_strat, bool allow_sync)
Definition: tableam.h:907
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
uint16 OffsetNumber
Definition: off.h:24
void FreeExecutorState(EState *estate)
Definition: execUtils.c:186
#define GetPerTupleExprContext(estate)
Definition: executor.h:509
List * ii_ExpressionsState
Definition: execnodes.h:161
void pfree(void *pointer)
Definition: mcxt.c:1057
#define ERROR
Definition: elog.h:45
ItemPointerData t_self
Definition: htup.h:65
#define DatumGetInt64(X)
Definition: postgres.h:607
Tuplesortstate * tuplesort
Definition: index.h:49
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition: heapam.c:1316
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
#define RelationGetRelationName(relation)
Definition: rel.h:491
ExprState * ExecPrepareQual(List *qual, EState *estate)
Definition: execExpr.c:520
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
void heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
Definition: pruneheap.c:884
EState * CreateExecutorState(void)
Definition: execUtils.c:90
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
#define PROGRESS_SCAN_BLOCKS_DONE
Definition: progress.h:120
uintptr_t Datum
Definition: postgres.h:367
BlockNumber rs_nblocks
Definition: heapam.h:52
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
static void itemptr_decode(ItemPointer itemptr, int64 encoded)
Definition: index.h:209
#define ereport(elevel,...)
Definition: elog.h:155
Buffer rs_cbuf
Definition: heapam.h:60
int errmsg_internal(const char *fmt,...)
Definition: elog.c:992
bool ii_Unique
Definition: execnodes.h:171
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
#define Assert(condition)
Definition: c.h:804
double tups_inserted
Definition: index.h:51
#define INDEX_MAX_KEYS
#define InvalidBlockNumber
Definition: block.h:33
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:225
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define PROGRESS_SCAN_BLOCKS_TOTAL
Definition: progress.h:119
#define DatumGetPointer(X)
Definition: postgres.h:549
double htups
Definition: index.h:51
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:148
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:991
static Datum values[MAXATTR]
Definition: bootstrap.c:165
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:84
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:100
bool index_insert(Relation indexRelation, Datum *values, bool *isnull, ItemPointer heap_t_ctid, Relation heapRelation, IndexUniqueCheck checkUnique, bool indexUnchanged, IndexInfo *indexInfo)
Definition: indexam.c:176
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
TupleTableSlot * ExecStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1322
Pointer Page
Definition: bufpage.h:78

◆ heapam_relation_copy_data()

static void heapam_relation_copy_data ( Relation  rel,
const RelFileNode newrnode 
)
static

Definition at line 623 of file heapam_handler.c.

References FlushRelationBuffers(), INIT_FORKNUM, log_smgrcreate(), MAIN_FORKNUM, MAX_FORKNUM, RelationData::rd_backend, RelationData::rd_rel, RelationData::rd_smgr, RelationCopyStorage(), RelationCreateStorage(), RelationDropStorage(), RelationOpenSmgr, smgrclose(), smgrcreate(), smgrexists(), and smgropen().

Referenced by SampleHeapTupleVisible().

624 {
625  SMgrRelation dstrel;
626 
627  dstrel = smgropen(*newrnode, rel->rd_backend);
628  RelationOpenSmgr(rel);
629 
630  /*
631  * Since we copy the file directly without looking at the shared buffers,
632  * we'd better first flush out any pages of the source relation that are
633  * in shared buffers. We assume no new changes will be made while we are
634  * holding exclusive lock on the rel.
635  */
637 
638  /*
639  * Create and copy all forks of the relation, and schedule unlinking of
640  * old physical files.
641  *
642  * NOTE: any conflict in relfilenode value will be caught in
643  * RelationCreateStorage().
644  */
645  RelationCreateStorage(*newrnode, rel->rd_rel->relpersistence);
646 
647  /* copy main fork */
649  rel->rd_rel->relpersistence);
650 
651  /* copy those extra forks that exist */
652  for (ForkNumber forkNum = MAIN_FORKNUM + 1;
653  forkNum <= MAX_FORKNUM; forkNum++)
654  {
655  if (smgrexists(rel->rd_smgr, forkNum))
656  {
657  smgrcreate(dstrel, forkNum, false);
658 
659  /*
660  * WAL log creation if the relation is persistent, or this is the
661  * init fork of an unlogged relation.
662  */
663  if (rel->rd_rel->relpersistence == RELPERSISTENCE_PERMANENT ||
664  (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
665  forkNum == INIT_FORKNUM))
666  log_smgrcreate(newrnode, forkNum);
667  RelationCopyStorage(rel->rd_smgr, dstrel, forkNum,
668  rel->rd_rel->relpersistence);
669  }
670  }
671 
672 
673  /* drop old relation, and close new one */
674  RelationDropStorage(rel);
675  smgrclose(dstrel);
676 }
void smgrclose(SMgrRelation reln)
Definition: smgr.c:256
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:333
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:247
Form_pg_class rd_rel
Definition: rel.h:110
#define RelationOpenSmgr(relation)
Definition: rel.h:514
SMgrRelation RelationCreateStorage(RelFileNode rnode, char relpersistence)
Definition: storage.c:118
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
ForkNumber
Definition: relpath.h:40
void RelationDropStorage(Relation rel)
Definition: storage.c:195
void RelationCopyStorage(SMgrRelation src, SMgrRelation dst, ForkNumber forkNum, char relpersistence)
Definition: storage.c:408
BackendId rd_backend
Definition: rel.h:59
void FlushRelationBuffers(Relation rel)
Definition: bufmgr.c:3441
#define MAX_FORKNUM
Definition: relpath.h:55
void log_smgrcreate(const RelFileNode *rnode, ForkNumber forkNum)
Definition: storage.c:175

◆ heapam_relation_copy_for_cluster()

static void heapam_relation_copy_for_cluster ( Relation  OldHeap,
Relation  NewHeap,
Relation  OldIndex,
bool  use_sort,
TransactionId  OldestXmin,
TransactionId xid_cutoff,
MultiXactId multi_cutoff,
double *  num_tuples,
double *  tups_vacuumed,
double *  tups_recently_dead 
)
static

Definition at line 679 of file heapam_handler.c.

References Assert, begin_heap_rewrite(), buf, BufferHeapTupleTableSlot::buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, CHECK_FOR_INTERRUPTS, elog, end_heap_rewrite(), ERROR, ExecDropSingleTupleTableSlot(), ExecFetchSlotHeapTuple(), ForwardScanDirection, HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleSatisfiesVacuum(), index_beginscan(), index_endscan(), index_getnext_slot(), index_rescan(), InvalidBlockNumber, IsSystemRelation(), LockBuffer(), maintenance_work_mem, TupleDescData::natts, palloc(), pfree(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_CLUSTER_HEAP_BLKS_SCANNED, PROGRESS_CLUSTER_HEAP_TUPLES_SCANNED, PROGRESS_CLUSTER_HEAP_TUPLES_WRITTEN, PROGRESS_CLUSTER_INDEX_RELID, PROGRESS_CLUSTER_PHASE, PROGRESS_CLUSTER_PHASE_INDEX_SCAN_HEAP, PROGRESS_CLUSTER_PHASE_SEQ_SCAN_HEAP, PROGRESS_CLUSTER_PHASE_SORT_TUPLES, PROGRESS_CLUSTER_PHASE_WRITE_NEW_HEAP, PROGRESS_CLUSTER_TOTAL_HEAP_BLKS, reform_and_rewrite_tuple(), RelationGetDescr, RelationGetRelationName, RelationGetRelid, RelationGetTargetBlock, rewrite_heap_dead_tuple(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_nblocks, HeapScanDescData::rs_startblock, SnapshotAny, HeapTupleData::t_data, table_beginscan(), table_endscan(), table_scan_getnextslot(), table_slot_create(), TransactionIdIsCurrentTransactionId(), tuplesort_begin_cluster(), tuplesort_end(), tuplesort_getheaptuple(), tuplesort_performsort(), tuplesort_putheaptuple(), values, WARNING, and IndexScanDescData::xs_recheck.

Referenced by SampleHeapTupleVisible().

687 {
688  RewriteState rwstate;
689  IndexScanDesc indexScan;
690  TableScanDesc tableScan;
691  HeapScanDesc heapScan;
692  bool is_system_catalog;
693  Tuplesortstate *tuplesort;
694  TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
695  TupleDesc newTupDesc = RelationGetDescr(NewHeap);
696  TupleTableSlot *slot;
697  int natts;
698  Datum *values;
699  bool *isnull;
701  BlockNumber prev_cblock = InvalidBlockNumber;
702 
703  /* Remember if it's a system catalog */
704  is_system_catalog = IsSystemRelation(OldHeap);
705 
706  /*
707  * Valid smgr_targblock implies something already wrote to the relation.
708  * This may be harmless, but this function hasn't planned for it.
709  */
711 
712  /* Preallocate values/isnull arrays */
713  natts = newTupDesc->natts;
714  values = (Datum *) palloc(natts * sizeof(Datum));
715  isnull = (bool *) palloc(natts * sizeof(bool));
716 
717  /* Initialize the rewrite operation */
718  rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestXmin, *xid_cutoff,
719  *multi_cutoff);
720 
721 
722  /* Set up sorting if wanted */
723  if (use_sort)
724  tuplesort = tuplesort_begin_cluster(oldTupDesc, OldIndex,
726  NULL, false);
727  else
728  tuplesort = NULL;
729 
730  /*
731  * Prepare to scan the OldHeap. To ensure we see recently-dead tuples
732  * that still need to be copied, we scan with SnapshotAny and use
733  * HeapTupleSatisfiesVacuum for the visibility test.
734  */
735  if (OldIndex != NULL && !use_sort)
736  {
737  const int ci_index[] = {
740  };
741  int64 ci_val[2];
742 
743  /* Set phase and OIDOldIndex to columns */
745  ci_val[1] = RelationGetRelid(OldIndex);
746  pgstat_progress_update_multi_param(2, ci_index, ci_val);
747 
748  tableScan = NULL;
749  heapScan = NULL;
750  indexScan = index_beginscan(OldHeap, OldIndex, SnapshotAny, 0, 0);
751  index_rescan(indexScan, NULL, 0, NULL, 0);
752  }
753  else
754  {
755  /* In scan-and-sort mode and also VACUUM FULL, set phase */
758 
759  tableScan = table_beginscan(OldHeap, SnapshotAny, 0, (ScanKey) NULL);
760  heapScan = (HeapScanDesc) tableScan;
761  indexScan = NULL;
762 
763  /* Set total heap blocks */
765  heapScan->rs_nblocks);
766  }
767 
768  slot = table_slot_create(OldHeap, NULL);
769  hslot = (BufferHeapTupleTableSlot *) slot;
770 
771  /*
772  * Scan through the OldHeap, either in OldIndex order or sequentially;
773  * copy each tuple into the NewHeap, or transiently to the tuplesort
774  * module. Note that we don't bother sorting dead tuples (they won't get
775  * to the new table anyway).
776  */
777  for (;;)
778  {
779  HeapTuple tuple;
780  Buffer buf;
781  bool isdead;
782 
784 
785  if (indexScan != NULL)
786  {
787  if (!index_getnext_slot(indexScan, ForwardScanDirection, slot))
788  break;
789 
790  /* Since we used no scan keys, should never need to recheck */
791  if (indexScan->xs_recheck)
792  elog(ERROR, "CLUSTER does not support lossy index conditions");
793  }
794  else
795  {
796  if (!table_scan_getnextslot(tableScan, ForwardScanDirection, slot))
797  {
798  /*
799  * If the last pages of the scan were empty, we would go to
800  * the next phase while heap_blks_scanned != heap_blks_total.
801  * Instead, to ensure that heap_blks_scanned is equivalent to
802  * total_heap_blks after the table scan phase, this parameter
803  * is manually updated to the correct value when the table
804  * scan finishes.
805  */
807  heapScan->rs_nblocks);
808  break;
809  }
810 
811  /*
812  * In scan-and-sort mode and also VACUUM FULL, set heap blocks
813  * scanned
814  *
815  * Note that heapScan may start at an offset and wrap around, i.e.
816  * rs_startblock may be >0, and rs_cblock may end with a number
817  * below rs_startblock. To prevent showing this wraparound to the
818  * user, we offset rs_cblock by rs_startblock (modulo rs_nblocks).
819  */
820  if (prev_cblock != heapScan->rs_cblock)
821  {
823  (heapScan->rs_cblock +
824  heapScan->rs_nblocks -
825  heapScan->rs_startblock
826  ) % heapScan->rs_nblocks + 1);
827  prev_cblock = heapScan->rs_cblock;
828  }
829  }
830 
831  tuple = ExecFetchSlotHeapTuple(slot, false, NULL);
832  buf = hslot->buffer;
833 
835 
836  switch (HeapTupleSatisfiesVacuum(tuple, OldestXmin, buf))
837  {
838  case HEAPTUPLE_DEAD:
839  /* Definitely dead */
840  isdead = true;
841  break;
843  *tups_recently_dead += 1;
844  /* fall through */
845  case HEAPTUPLE_LIVE:
846  /* Live or recently dead, must copy it */
847  isdead = false;
848  break;
850 
851  /*
852  * Since we hold exclusive lock on the relation, normally the
853  * only way to see this is if it was inserted earlier in our
854  * own transaction. However, it can happen in system
855  * catalogs, since we tend to release write lock before commit
856  * there. Give a warning if neither case applies; but in any
857  * case we had better copy it.
858  */
859  if (!is_system_catalog &&
861  elog(WARNING, "concurrent insert in progress within table \"%s\"",
862  RelationGetRelationName(OldHeap));
863  /* treat as live */
864  isdead = false;
865  break;
867 
868  /*
869  * Similar situation to INSERT_IN_PROGRESS case.
870  */
871  if (!is_system_catalog &&
873  elog(WARNING, "concurrent delete in progress within table \"%s\"",
874  RelationGetRelationName(OldHeap));
875  /* treat as recently dead */
876  *tups_recently_dead += 1;
877  isdead = false;
878  break;
879  default:
880  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
881  isdead = false; /* keep compiler quiet */
882  break;
883  }
884 
886 
887  if (isdead)
888  {
889  *tups_vacuumed += 1;
890  /* heap rewrite module still needs to see it... */
891  if (rewrite_heap_dead_tuple(rwstate, tuple))
892  {
893  /* A previous recently-dead tuple is now known dead */
894  *tups_vacuumed += 1;
895  *tups_recently_dead -= 1;
896  }
897  continue;
898  }
899 
900  *num_tuples += 1;
901  if (tuplesort != NULL)
902  {
903  tuplesort_putheaptuple(tuplesort, tuple);
904 
905  /*
906  * In scan-and-sort mode, report increase in number of tuples
907  * scanned
908  */
910  *num_tuples);
911  }
912  else
913  {
914  const int ct_index[] = {
917  };
918  int64 ct_val[2];
919 
920  reform_and_rewrite_tuple(tuple, OldHeap, NewHeap,
921  values, isnull, rwstate);
922 
923  /*
924  * In indexscan mode and also VACUUM FULL, report increase in
925  * number of tuples scanned and written
926  */
927  ct_val[0] = *num_tuples;
928  ct_val[1] = *num_tuples;
929  pgstat_progress_update_multi_param(2, ct_index, ct_val);
930  }
931  }
932 
933  if (indexScan != NULL)
934  index_endscan(indexScan);
935  if (tableScan != NULL)
936  table_endscan(tableScan);
937  if (slot)
939 
940  /*
941  * In scan-and-sort mode, complete the sort, then read out all live tuples
942  * from the tuplestore and write them to the new relation.
943  */
944  if (tuplesort != NULL)
945  {
946  double n_tuples = 0;
947 
948  /* Report that we are now sorting tuples */
951 
952  tuplesort_performsort(tuplesort);
953 
954  /* Report that we are now writing new heap */
957 
958  for (;;)
959  {
960  HeapTuple tuple;
961 
963 
964  tuple = tuplesort_getheaptuple(tuplesort, true);
965  if (tuple == NULL)
966  break;
967 
968  n_tuples += 1;
970  OldHeap, NewHeap,
971  values, isnull,
972  rwstate);
973  /* Report n_tuples */
975  n_tuples);
976  }
977 
978  tuplesort_end(tuplesort);
979  }
980 
981  /* Write out any remaining tuples, and fsync if needed */
982  end_heap_rewrite(rwstate);
983 
984  /* Clean up */
985  pfree(values);
986  pfree(isnull);
987 }
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:91
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define PROGRESS_CLUSTER_PHASE_SEQ_SCAN_HEAP
Definition: progress.h:65
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
BlockNumber rs_cblock
Definition: heapam.h:59
void tuplesort_performsort(Tuplesortstate *state)
Definition: tuplesort.c:2040
void end_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:300
HeapTuple tuplesort_getheaptuple(Tuplesortstate *state, bool forward)
Definition: tuplesort.c:2445
bool IsSystemRelation(Relation relation)
Definition: catalog.c:66
#define PROGRESS_CLUSTER_HEAP_TUPLES_WRITTEN
Definition: progress.h:59
#define RelationGetDescr(relation)
Definition: rel.h:483
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
static void reform_and_rewrite_tuple(HeapTuple tuple, Relation OldHeap, Relation NewHeap, Datum *values, bool *isnull, RewriteState rwstate)
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3478
Tuplesortstate * tuplesort_begin_cluster(TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, bool randomAccess)
Definition: tuplesort.c:971
void index_rescan(IndexScanDesc scan, ScanKey keys, int nkeys, ScanKey orderbys, int norderbys)
Definition: indexam.c:297
#define PROGRESS_CLUSTER_INDEX_RELID
Definition: progress.h:57
#define PROGRESS_CLUSTER_PHASE_WRITE_NEW_HEAP
Definition: progress.h:68
uint32 BlockNumber
Definition: block.h:31
static bool table_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition: tableam.h:1032
#define RelationGetTargetBlock(relation)
Definition: rel.h:542
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleHeader t_data
Definition: htup.h:68
void pfree(void *pointer)
Definition: mcxt.c:1057
#define ERROR
Definition: elog.h:45
static TableScanDesc table_beginscan(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key)
Definition: tableam.h:883
#define PROGRESS_CLUSTER_PHASE_SORT_TUPLES
Definition: progress.h:67
static char * buf
Definition: pg_test_fsync.c:68
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
#define PROGRESS_CLUSTER_HEAP_TUPLES_SCANNED
Definition: progress.h:58
#define RelationGetRelationName(relation)
Definition: rel.h:491
static TransactionId OldestXmin
Definition: vacuumlazy.c:335
void index_endscan(IndexScanDesc scan)
Definition: indexam.c:323
BlockNumber rs_startblock
Definition: heapam.h:53
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1614
#define WARNING
Definition: elog.h:40
uintptr_t Datum
Definition: postgres.h:367
BlockNumber rs_nblocks
Definition: heapam.h:52
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
int maintenance_work_mem
Definition: globals.c:124
#define Assert(condition)
Definition: c.h:804
#define PROGRESS_CLUSTER_PHASE_INDEX_SCAN_HEAP
Definition: progress.h:66
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
Definition: pgstat.c:3500
#define InvalidBlockNumber
Definition: block.h:33
bool index_getnext_slot(IndexScanDesc scan, ScanDirection direction, TupleTableSlot *slot)
Definition: indexam.c:616
#define SnapshotAny
Definition: snapmgr.h:68
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:991
static Datum values[MAXATTR]
Definition: bootstrap.c:165
bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
Definition: rewriteheap.c:564
void * palloc(Size size)
Definition: mcxt.c:950
#define elog(elevel,...)
Definition: elog.h:227
void tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
Definition: tuplesort.c:1706
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define PROGRESS_CLUSTER_HEAP_BLKS_SCANNED
Definition: progress.h:61
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:100
#define PROGRESS_CLUSTER_TOTAL_HEAP_BLKS
Definition: progress.h:60
RewriteState begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin, TransactionId freeze_xid, MultiXactId cutoff_multi)
Definition: rewriteheap.c:237
void tuplesort_end(Tuplesortstate *state)
Definition: tuplesort.c:1464
#define PROGRESS_CLUSTER_PHASE
Definition: progress.h:56
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:457
IndexScanDesc index_beginscan(Relation heapRelation, Relation indexRelation, Snapshot snapshot, int nkeys, int norderbys)
Definition: indexam.c:205

◆ heapam_relation_needs_toast_table()

static bool heapam_relation_needs_toast_table ( Relation  rel)
static

Definition at line 2029 of file heapam_handler.c.

References att_align_nominal, BITMAPLEN, i, MAXALIGN, TupleDescData::natts, RelationData::rd_att, SizeofHeapTupleHeader, TOAST_TUPLE_THRESHOLD, TupleDescAttr, and type_maximum_size().

Referenced by SampleHeapTupleVisible().

2030 {
2031  int32 data_length = 0;
2032  bool maxlength_unknown = false;
2033  bool has_toastable_attrs = false;
2034  TupleDesc tupdesc = rel->rd_att;
2035  int32 tuple_length;
2036  int i;
2037 
2038  for (i = 0; i < tupdesc->natts; i++)
2039  {
2040  Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2041 
2042  if (att->attisdropped)
2043  continue;
2044  data_length = att_align_nominal(data_length, att->attalign);
2045  if (att->attlen > 0)
2046  {
2047  /* Fixed-length types are never toastable */
2048  data_length += att->attlen;
2049  }
2050  else
2051  {
2052  int32 maxlen = type_maximum_size(att->atttypid,
2053  att->atttypmod);
2054 
2055  if (maxlen < 0)
2056  maxlength_unknown = true;
2057  else
2058  data_length += maxlen;
2059  if (att->attstorage != TYPSTORAGE_PLAIN)
2060  has_toastable_attrs = true;
2061  }
2062  }
2063  if (!has_toastable_attrs)
2064  return false; /* nothing to toast? */
2065  if (maxlength_unknown)
2066  return true; /* any unlimited-length attrs? */
2067  tuple_length = MAXALIGN(SizeofHeapTupleHeader +
2068  BITMAPLEN(tupdesc->natts)) +
2069  MAXALIGN(data_length);
2070  return (tuple_length > TOAST_TUPLE_THRESHOLD);
2071 }
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
#define att_align_nominal(cur_offset, attalign)
Definition: tupmacs.h:148
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
#define BITMAPLEN(NATTS)
Definition: htup_details.h:547
signed int int32
Definition: c.h:429
int32 type_maximum_size(Oid type_oid, int32 typemod)
Definition: format_type.c:408
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:197
#define TOAST_TUPLE_THRESHOLD
Definition: heaptoast.h:48
TupleDesc rd_att
Definition: rel.h:111
#define MAXALIGN(LEN)
Definition: c.h:757
int i

◆ heapam_relation_nontransactional_truncate()

static void heapam_relation_nontransactional_truncate ( Relation  rel)
static

Definition at line 617 of file heapam_handler.c.

References RelationTruncate().

Referenced by SampleHeapTupleVisible().

618 {
619  RelationTruncate(rel, 0);
620 }
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:277

◆ heapam_relation_set_new_filenode()

static void heapam_relation_set_new_filenode ( Relation  rel,
const RelFileNode newrnode,
char  persistence,
TransactionId freezeXid,
MultiXactId minmulti 
)
static

Definition at line 567 of file heapam_handler.c.

References Assert, GetOldestMultiXactId(), INIT_FORKNUM, log_smgrcreate(), RelationData::rd_rel, RecentXmin, RelationCreateStorage(), smgrclose(), smgrcreate(), and smgrimmedsync().

Referenced by SampleHeapTupleVisible().

572 {
573  SMgrRelation srel;
574 
575  /*
576  * Initialize to the minimum XID that could put tuples in the table. We
577  * know that no xacts older than RecentXmin are still running, so that
578  * will do.
579  */
580  *freezeXid = RecentXmin;
581 
582  /*
583  * Similarly, initialize the minimum Multixact to the first value that
584  * could possibly be stored in tuples in the table. Running transactions
585  * could reuse values from their local cache, so we are careful to
586  * consider all currently running multis.
587  *
588  * XXX this could be refined further, but is it worth the hassle?
589  */
590  *minmulti = GetOldestMultiXactId();
591 
592  srel = RelationCreateStorage(*newrnode, persistence);
593 
594  /*
595  * If required, set up an init fork for an unlogged table so that it can
596  * be correctly reinitialized on restart. An immediate sync is required
597  * even if the page has been logged, because the write did not go through
598  * shared_buffers and therefore a concurrent checkpoint may have moved the
599  * redo pointer past our xlog record. Recovery may as well remove it
600  * while replaying, for example, XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE
601  * record. Therefore, logging is necessary even if wal_level=minimal.
602  */
603  if (persistence == RELPERSISTENCE_UNLOGGED)
604  {
605  Assert(rel->rd_rel->relkind == RELKIND_RELATION ||
606  rel->rd_rel->relkind == RELKIND_MATVIEW ||
607  rel->rd_rel->relkind == RELKIND_TOASTVALUE);
608  smgrcreate(srel, INIT_FORKNUM, false);
609  log_smgrcreate(newrnode, INIT_FORKNUM);
611  }
612 
613  smgrclose(srel);
614 }
void smgrclose(SMgrRelation reln)
Definition: smgr.c:256
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:333
TransactionId RecentXmin
Definition: snapmgr.c:113
Form_pg_class rd_rel
Definition: rel.h:110
SMgrRelation RelationCreateStorage(RelFileNode rnode, char relpersistence)
Definition: storage.c:118
MultiXactId GetOldestMultiXactId(void)
Definition: multixact.c:2503
#define Assert(condition)
Definition: c.h:804
void smgrimmedsync(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:660
void log_smgrcreate(const RelFileNode *rnode, ForkNumber forkNum)
Definition: storage.c:175

◆ heapam_relation_toast_am()

static Oid heapam_relation_toast_am ( Relation  rel)
static

Definition at line 2077 of file heapam_handler.c.

References RelationData::rd_rel.

Referenced by SampleHeapTupleVisible().

2078 {
2079  return rel->rd_rel->relam;
2080 }
Form_pg_class rd_rel
Definition: rel.h:110

◆ heapam_scan_analyze_next_block()

static bool heapam_scan_analyze_next_block ( TableScanDesc  scan,
BlockNumber  blockno,
BufferAccessStrategy  bstrategy 
)
static

Definition at line 990 of file heapam_handler.c.

References BUFFER_LOCK_SHARE, FirstOffsetNumber, LockBuffer(), MAIN_FORKNUM, RBM_NORMAL, ReadBufferExtended(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, and TableScanDescData::rs_rd.

Referenced by SampleHeapTupleVisible().

992 {
993  HeapScanDesc hscan = (HeapScanDesc) scan;
994 
995  /*
996  * We must maintain a pin on the target page's buffer to ensure that
997  * concurrent activity - e.g. HOT pruning - doesn't delete tuples out from
998  * under us. Hence, pin the page until we are done looking at it. We
999  * also choose to hold sharelock on the buffer throughout --- we could
1000  * release and re-acquire sharelock for each tuple, but since we aren't
1001  * doing much work per tuple, the extra lock traffic is probably better
1002  * avoided.
1003  */
1004  hscan->rs_cblock = blockno;
1005  hscan->rs_cindex = FirstOffsetNumber;
1006  hscan->rs_cbuf = ReadBufferExtended(scan->rs_rd, MAIN_FORKNUM,
1007  blockno, RBM_NORMAL, bstrategy);
1009 
1010  /* in heap all blocks can contain tuples, so always return true */
1011  return true;
1012 }
BlockNumber rs_cblock
Definition: heapam.h:59
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:666
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
#define FirstOffsetNumber
Definition: off.h:27
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
Buffer rs_cbuf
Definition: heapam.h:60
Relation rs_rd
Definition: relscan.h:34
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97

◆ heapam_scan_analyze_next_tuple()

static bool heapam_scan_analyze_next_tuple ( TableScanDesc  scan,
TransactionId  OldestXmin,
double *  liverows,
double *  deadrows,
TupleTableSlot slot 
)
static

Definition at line 1015 of file heapam_handler.c.

References Assert, BufferHeapTupleTableSlot::base, BufferGetPage, elog, ERROR, ExecClearTuple(), ExecStoreBufferHeapTuple(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleSatisfiesVacuum(), InvalidBuffer, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemPointerSet, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, RelationGetRelid, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, TableScanDescData::rs_rd, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdIsCurrentTransactionId(), TTS_IS_BUFFERTUPLE, HeapTupleTableSlot::tupdata, and UnlockReleaseBuffer().

Referenced by SampleHeapTupleVisible().

1018 {
1019  HeapScanDesc hscan = (HeapScanDesc) scan;
1020  Page targpage;
1021  OffsetNumber maxoffset;
1022  BufferHeapTupleTableSlot *hslot;
1023 
1024  Assert(TTS_IS_BUFFERTUPLE(slot));
1025 
1026  hslot = (BufferHeapTupleTableSlot *) slot;
1027  targpage = BufferGetPage(hscan->rs_cbuf);
1028  maxoffset = PageGetMaxOffsetNumber(targpage);
1029 
1030  /* Inner loop over all tuples on the selected page */
1031  for (; hscan->rs_cindex <= maxoffset; hscan->rs_cindex++)
1032  {
1033  ItemId itemid;
1034  HeapTuple targtuple = &hslot->base.tupdata;
1035  bool sample_it = false;
1036 
1037  itemid = PageGetItemId(targpage, hscan->rs_cindex);
1038 
1039  /*
1040  * We ignore unused and redirect line pointers. DEAD line pointers
1041  * should be counted as dead, because we need vacuum to run to get rid
1042  * of them. Note that this rule agrees with the way that
1043  * heap_page_prune() counts things.
1044  */
1045  if (!ItemIdIsNormal(itemid))
1046  {
1047  if (ItemIdIsDead(itemid))
1048  *deadrows += 1;
1049  continue;
1050  }
1051 
1052  ItemPointerSet(&targtuple->t_self, hscan->rs_cblock, hscan->rs_cindex);
1053 
1054  targtuple->t_tableOid = RelationGetRelid(scan->rs_rd);
1055  targtuple->t_data = (HeapTupleHeader) PageGetItem(targpage, itemid);
1056  targtuple->t_len = ItemIdGetLength(itemid);
1057 
1058  switch (HeapTupleSatisfiesVacuum(targtuple, OldestXmin,
1059  hscan->rs_cbuf))
1060  {
1061  case HEAPTUPLE_LIVE:
1062  sample_it = true;
1063  *liverows += 1;
1064  break;
1065 
1066  case HEAPTUPLE_DEAD:
1068  /* Count dead and recently-dead rows */
1069  *deadrows += 1;
1070  break;
1071 
1073 
1074  /*
1075  * Insert-in-progress rows are not counted. We assume that
1076  * when the inserting transaction commits or aborts, it will
1077  * send a stats message to increment the proper count. This
1078  * works right only if that transaction ends after we finish
1079  * analyzing the table; if things happen in the other order,
1080  * its stats update will be overwritten by ours. However, the
1081  * error will be large only if the other transaction runs long
1082  * enough to insert many tuples, so assuming it will finish
1083  * after us is the safer option.
1084  *
1085  * A special case is that the inserting transaction might be
1086  * our own. In this case we should count and sample the row,
1087  * to accommodate users who load a table and analyze it in one
1088  * transaction. (pgstat_report_analyze has to adjust the
1089  * numbers we send to the stats collector to make this come
1090  * out right.)
1091  */
1093  {
1094  sample_it = true;
1095  *liverows += 1;
1096  }
1097  break;
1098 
1100 
1101  /*
1102  * We count and sample delete-in-progress rows the same as
1103  * live ones, so that the stats counters come out right if the
1104  * deleting transaction commits after us, per the same
1105  * reasoning given above.
1106  *
1107  * If the delete was done by our own transaction, however, we
1108  * must count the row as dead to make pgstat_report_analyze's
1109  * stats adjustments come out right. (Note: this works out
1110  * properly when the row was both inserted and deleted in our
1111  * xact.)
1112  *
1113  * The net effect of these choices is that we act as though an
1114  * IN_PROGRESS transaction hasn't happened yet, except if it
1115  * is our own transaction, which we assume has happened.
1116  *
1117  * This approach ensures that we behave sanely if we see both
1118  * the pre-image and post-image rows for a row being updated
1119  * by a concurrent transaction: we will sample the pre-image
1120  * but not the post-image. We also get sane results if the
1121  * concurrent transaction never commits.
1122  */
1124  *deadrows += 1;
1125  else
1126  {
1127  sample_it = true;
1128  *liverows += 1;
1129  }
1130  break;
1131 
1132  default:
1133  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1134  break;
1135  }
1136 
1137  if (sample_it)
1138  {
1139  ExecStoreBufferHeapTuple(targtuple, slot, hscan->rs_cbuf);
1140  hscan->rs_cindex++;
1141 
1142  /* note that we leave the buffer locked here! */
1143  return true;
1144  }
1145  }
1146 
1147  /* Now release the lock and pin on the page */
1148  UnlockReleaseBuffer(hscan->rs_cbuf);
1149  hscan->rs_cbuf = InvalidBuffer;
1150 
1151  /* also prevent old slot contents from having pin on page */
1152  ExecClearTuple(slot);
1153 
1154  return false;
1155 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
BlockNumber rs_cblock
Definition: heapam.h:59
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3723
#define ERROR
Definition: elog.h:45
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
static TransactionId OldestXmin
Definition: vacuumlazy.c:335
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:231
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
Buffer rs_cbuf
Definition: heapam.h:60
#define Assert(condition)
Definition: c.h:804
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Relation rs_rd
Definition: relscan.h:34
#define elog(elevel,...)
Definition: elog.h:227
HeapTupleTableSlot base
Definition: tuptable.h:259
HeapTupleData tupdata
Definition: tuptable.h:253
#define RelationGetRelid(relation)
Definition: rel.h:457
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127

◆ heapam_scan_bitmap_next_block()

static bool heapam_scan_bitmap_next_block ( TableScanDesc  scan,
TBMIterateResult tbmres 
)
static

Definition at line 2111 of file heapam_handler.c.

References Assert, TBMIterateResult::blockno, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, FirstOffsetNumber, heap_hot_search_buffer(), heap_page_prune_opt(), HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetXmin, HeapTupleSatisfiesVisibility(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetOffsetNumber, ItemPointerSet, LockBuffer(), MaxHeapTuplesPerPage, TBMIterateResult::ntuples, OffsetNumberNext, TBMIterateResult::offsets, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTID(), RelationData::rd_id, ReleaseAndReadBuffer(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, HeapScanDescData::rs_nblocks, HeapScanDescData::rs_ntuples, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_vistuples, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by SampleHeapTupleVisible().

2113 {
2114  HeapScanDesc hscan = (HeapScanDesc) scan;
2115  BlockNumber page = tbmres->blockno;
2116  Buffer buffer;
2117  Snapshot snapshot;
2118  int ntup;
2119 
2120  hscan->rs_cindex = 0;
2121  hscan->rs_ntuples = 0;
2122 
2123  /*
2124  * Ignore any claimed entries past what we think is the end of the
2125  * relation. It may have been extended after the start of our scan (we
2126  * only hold an AccessShareLock, and it could be inserts from this
2127  * backend).
2128  */
2129  if (page >= hscan->rs_nblocks)
2130  return false;
2131 
2132  /*
2133  * Acquire pin on the target heap page, trading in any pin we held before.
2134  */
2135  hscan->rs_cbuf = ReleaseAndReadBuffer(hscan->rs_cbuf,
2136  scan->rs_rd,
2137  page);
2138  hscan->rs_cblock = page;
2139  buffer = hscan->rs_cbuf;
2140  snapshot = scan->rs_snapshot;
2141 
2142  ntup = 0;
2143 
2144  /*
2145  * Prune and repair fragmentation for the whole page, if possible.
2146  */
2147  heap_page_prune_opt(scan->rs_rd, buffer);
2148 
2149  /*
2150  * We must hold share lock on the buffer content while examining tuple
2151  * visibility. Afterwards, however, the tuples we have found to be
2152  * visible are guaranteed good as long as we hold the buffer pin.
2153  */
2154  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2155 
2156  /*
2157  * We need two separate strategies for lossy and non-lossy cases.
2158  */
2159  if (tbmres->ntuples >= 0)
2160  {
2161  /*
2162  * Bitmap is non-lossy, so we just look through the offsets listed in
2163  * tbmres; but we have to follow any HOT chain starting at each such
2164  * offset.
2165  */
2166  int curslot;
2167 
2168  for (curslot = 0; curslot < tbmres->ntuples; curslot++)
2169  {
2170  OffsetNumber offnum = tbmres->offsets[curslot];
2171  ItemPointerData tid;
2172  HeapTupleData heapTuple;
2173 
2174  ItemPointerSet(&tid, page, offnum);
2175  if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
2176  &heapTuple, NULL, true))
2177  hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
2178  }
2179  }
2180  else
2181  {
2182  /*
2183  * Bitmap is lossy, so we must examine each line pointer on the page.
2184  * But we can ignore HOT chains, since we'll check each tuple anyway.
2185  */
2186  Page dp = (Page) BufferGetPage(buffer);
2187  OffsetNumber maxoff = PageGetMaxOffsetNumber(dp);
2188  OffsetNumber offnum;
2189 
2190  for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
2191  {
2192  ItemId lp;
2193  HeapTupleData loctup;
2194  bool valid;
2195 
2196  lp = PageGetItemId(dp, offnum);
2197  if (!ItemIdIsNormal(lp))
2198  continue;
2199  loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
2200  loctup.t_len = ItemIdGetLength(lp);
2201  loctup.t_tableOid = scan->rs_rd->rd_id;
2202  ItemPointerSet(&loctup.t_self, page, offnum);
2203  valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
2204  if (valid)
2205  {
2206  hscan->rs_vistuples[ntup++] = offnum;
2207  PredicateLockTID(scan->rs_rd, &loctup.t_self, snapshot,
2208  HeapTupleHeaderGetXmin(loctup.t_data));
2209  }
2210  HeapCheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
2211  buffer, snapshot);
2212  }
2213  }
2214 
2215  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2216 
2217  Assert(ntup <= MaxHeapTuplesPerPage);
2218  hscan->rs_ntuples = ntup;
2219 
2220  return ntup > 0;
2221 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
BlockNumber rs_cblock
Definition: heapam.h:59
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
BlockNumber blockno
Definition: tidbitmap.h:42
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:1686
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]
Definition: tidbitmap.h:46
#define FirstOffsetNumber
Definition: off.h:27
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
Oid rd_id
Definition: rel.h:112
int rs_ntuples
Definition: heapam.h:70
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
BlockNumber rs_nblocks
Definition: heapam.h:52
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2614
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9846
Buffer rs_cbuf
Definition: heapam.h:60
#define Assert(condition)
Definition: c.h:804
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
Definition: heapam.h:71
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:1546
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
void heap_page_prune_opt(Relation relation, Buffer buffer)
Definition: pruneheap.c:87
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heapam_scan_bitmap_next_tuple()

static bool heapam_scan_bitmap_next_tuple ( TableScanDesc  scan,
TBMIterateResult tbmres,
TupleTableSlot slot 
)
static

Definition at line 2224 of file heapam_handler.c.

References Assert, BufferGetPage, ExecStoreBufferHeapTuple(), ItemIdGetLength, ItemIdIsNormal, ItemPointerSet, PageGetItem, PageGetItemId, pgstat_count_heap_fetch, RelationData::rd_id, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, HeapScanDescData::rs_ctup, HeapScanDescData::rs_ntuples, TableScanDescData::rs_rd, HeapScanDescData::rs_vistuples, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by SampleHeapTupleVisible().

2227 {
2228  HeapScanDesc hscan = (HeapScanDesc) scan;
2229  OffsetNumber targoffset;
2230  Page dp;
2231  ItemId lp;
2232 
2233  /*
2234  * Out of range? If so, nothing more to look at on this page
2235  */
2236  if (hscan->rs_cindex < 0 || hscan->rs_cindex >= hscan->rs_ntuples)
2237  return false;
2238 
2239  targoffset = hscan->rs_vistuples[hscan->rs_cindex];
2240  dp = (Page) BufferGetPage(hscan->rs_cbuf);
2241  lp = PageGetItemId(dp, targoffset);
2242  Assert(ItemIdIsNormal(lp));
2243 
2244  hscan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
2245  hscan->rs_ctup.t_len = ItemIdGetLength(lp);
2246  hscan->rs_ctup.t_tableOid = scan->rs_rd->rd_id;
2247  ItemPointerSet(&hscan->rs_ctup.t_self, hscan->rs_cblock, targoffset);
2248 
2250 
2251  /*
2252  * Set up the result slot to point to this tuple. Note that the slot
2253  * acquires a pin on the buffer.
2254  */
2256  slot,
2257  hscan->rs_cbuf);
2258 
2259  hscan->rs_cindex++;
2260 
2261  return true;
2262 }
BlockNumber rs_cblock
Definition: heapam.h:59
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleData rs_ctup
Definition: heapam.h:66
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_self
Definition: htup.h:65
#define pgstat_count_heap_fetch(rel)
Definition: pgstat.h:1538
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
Oid rd_id
Definition: rel.h:112
int rs_ntuples
Definition: heapam.h:70
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
Buffer rs_cbuf
Definition: heapam.h:60
#define Assert(condition)
Definition: c.h:804
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
Definition: heapam.h:71
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Relation rs_rd
Definition: relscan.h:34
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127

◆ heapam_scan_get_blocks_done()

static BlockNumber heapam_scan_get_blocks_done ( HeapScanDesc  hscan)
static

Definition at line 1984 of file heapam_handler.c.

References ParallelBlockTableScanDescData::phs_nblocks, ParallelBlockTableScanDescData::phs_startblock, HeapScanDescData::rs_base, HeapScanDescData::rs_cblock, HeapScanDescData::rs_nblocks, TableScanDescData::rs_parallel, and HeapScanDescData::rs_startblock.

Referenced by heapam_index_build_range_scan().

1985 {
1986  ParallelBlockTableScanDesc bpscan = NULL;
1987  BlockNumber startblock;
1988  BlockNumber blocks_done;
1989 
1990  if (hscan->rs_base.rs_parallel != NULL)
1991  {
1993  startblock = bpscan->phs_startblock;
1994  }
1995  else
1996  startblock = hscan->rs_startblock;
1997 
1998  /*
1999  * Might have wrapped around the end of the relation, if startblock was
2000  * not zero.
2001  */
2002  if (hscan->rs_cblock > startblock)
2003  blocks_done = hscan->rs_cblock - startblock;
2004  else
2005  {
2006  BlockNumber nblocks;
2007 
2008  nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
2009  blocks_done = nblocks - startblock +
2010  hscan->rs_cblock;
2011  }
2012 
2013  return blocks_done;
2014 }
BlockNumber rs_cblock
Definition: heapam.h:59
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:86
TableScanDescData rs_base
Definition: heapam.h:49
uint32 BlockNumber
Definition: block.h:31
BlockNumber rs_startblock
Definition: heapam.h:53
BlockNumber rs_nblocks
Definition: heapam.h:52
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:50

◆ heapam_scan_sample_next_block()

static bool heapam_scan_sample_next_block ( TableScanDesc  scan,
SampleScanState scanstate 
)
static

Definition at line 2265 of file heapam_handler.c.

References Assert, BlockNumberIsValid, BufferIsValid, heapgetpage(), InvalidBlockNumber, InvalidBuffer, TsmRoutine::NextSampleBlock, ReleaseBuffer(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, TableScanDescData::rs_flags, HeapScanDescData::rs_inited, HeapScanDescData::rs_nblocks, TableScanDescData::rs_rd, HeapScanDescData::rs_startblock, SO_ALLOW_SYNC, ss_report_location(), and SampleScanState::tsmroutine.

Referenced by SampleHeapTupleVisible().

2266 {
2267  HeapScanDesc hscan = (HeapScanDesc) scan;
2268  TsmRoutine *tsm = scanstate->tsmroutine;
2269  BlockNumber blockno;
2270 
2271  /* return false immediately if relation is empty */
2272  if (hscan->rs_nblocks == 0)
2273  return false;
2274 
2275  if (tsm->NextSampleBlock)
2276  {
2277  blockno = tsm->NextSampleBlock(scanstate, hscan->rs_nblocks);
2278  hscan->rs_cblock = blockno;
2279  }
2280  else
2281  {
2282  /* scanning table sequentially */
2283 
2284  if (hscan->rs_cblock == InvalidBlockNumber)
2285  {
2286  Assert(!hscan->rs_inited);
2287  blockno = hscan->rs_startblock;
2288  }
2289  else
2290  {
2291  Assert(hscan->rs_inited);
2292 
2293  blockno = hscan->rs_cblock + 1;
2294 
2295  if (blockno >= hscan->rs_nblocks)
2296  {
2297  /* wrap to beginning of rel, might not have started at 0 */
2298  blockno = 0;
2299  }
2300 
2301  /*
2302  * Report our new scan position for synchronization purposes.
2303  *
2304  * Note: we do this before checking for end of scan so that the
2305  * final state of the position hint is back at the start of the
2306  * rel. That's not strictly necessary, but otherwise when you run
2307  * the same query multiple times the starting position would shift
2308  * a little bit backwards on every invocation, which is confusing.
2309  * We don't guarantee any specific ordering in general, though.
2310  */
2311  if (scan->rs_flags & SO_ALLOW_SYNC)
2312  ss_report_location(scan->rs_rd, blockno);
2313 
2314  if (blockno == hscan->rs_startblock)
2315  {
2316  blockno = InvalidBlockNumber;
2317  }
2318  }
2319  }
2320 
2321  if (!BlockNumberIsValid(blockno))
2322  {
2323  if (BufferIsValid(hscan->rs_cbuf))
2324  ReleaseBuffer(hscan->rs_cbuf);
2325  hscan->rs_cbuf = InvalidBuffer;
2326  hscan->rs_cblock = InvalidBlockNumber;
2327  hscan->rs_inited = false;
2328 
2329  return false;
2330  }
2331 
2332  heapgetpage(scan, blockno);
2333  hscan->rs_inited = true;
2334 
2335  return true;
2336 }
BlockNumber rs_cblock
Definition: heapam.h:59
#define InvalidBuffer
Definition: buf.h:25
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3700
uint32 rs_flags
Definition: relscan.h:47
void heapgetpage(TableScanDesc sscan, BlockNumber page)
Definition: heapam.c:371
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
bool rs_inited
Definition: heapam.h:58
NextSampleBlock_function NextSampleBlock
Definition: tsmapi.h:73
BlockNumber rs_startblock
Definition: heapam.h:53
BlockNumber rs_nblocks
Definition: heapam.h:52
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
Buffer rs_cbuf
Definition: heapam.h:60
#define Assert(condition)
Definition: c.h:804
#define InvalidBlockNumber
Definition: block.h:33
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
struct TsmRoutine * tsmroutine
Definition: execnodes.h:1356
void ss_report_location(Relation rel, BlockNumber location)
Definition: syncscan.c:288
Relation rs_rd
Definition: relscan.h:34

◆ heapam_scan_sample_next_tuple()

static bool heapam_scan_sample_next_tuple ( TableScanDesc  scan,
SampleScanState scanstate,
TupleTableSlot slot 
)
static

Definition at line 2339 of file heapam_handler.c.

References Assert, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CHECK_FOR_INTERRUPTS, ExecClearTuple(), ExecStoreBufferHeapTuple(), HeapCheckForSerializableConflictOut(), ItemIdGetLength, ItemIdIsNormal, ItemPointerSet, LockBuffer(), TsmRoutine::NextSampleTuple, OffsetNumberIsValid, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageIsAllVisible, pgstat_count_heap_getnext, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, SampleHeapTupleVisible(), SO_ALLOW_PAGEMODE, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, SnapshotData::takenDuringRecovery, and SampleScanState::tsmroutine.

Referenced by SampleHeapTupleVisible().

2341 {
2342  HeapScanDesc hscan = (HeapScanDesc) scan;
2343  TsmRoutine *tsm = scanstate->tsmroutine;
2344  BlockNumber blockno = hscan->rs_cblock;
2345  bool pagemode = (scan->rs_flags & SO_ALLOW_PAGEMODE) != 0;
2346 
2347  Page page;
2348  bool all_visible;
2349  OffsetNumber maxoffset;
2350 
2351  /*
2352  * When not using pagemode, we must lock the buffer during tuple
2353  * visibility checks.
2354  */
2355  if (!pagemode)
2357 
2358  page = (Page) BufferGetPage(hscan->rs_cbuf);
2359  all_visible = PageIsAllVisible(page) &&
2361  maxoffset = PageGetMaxOffsetNumber(page);
2362 
2363  for (;;)
2364  {
2365  OffsetNumber tupoffset;
2366 
2368 
2369  /* Ask the tablesample method which tuples to check on this page. */
2370  tupoffset = tsm->NextSampleTuple(scanstate,
2371  blockno,
2372  maxoffset);
2373 
2374  if (OffsetNumberIsValid(tupoffset))
2375  {
2376  ItemId itemid;
2377  bool visible;
2378  HeapTuple tuple = &(hscan->rs_ctup);
2379 
2380  /* Skip invalid tuple pointers. */
2381  itemid = PageGetItemId(page, tupoffset);
2382  if (!ItemIdIsNormal(itemid))
2383  continue;
2384 
2385  tuple->t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2386  tuple->t_len = ItemIdGetLength(itemid);
2387  ItemPointerSet(&(tuple->t_self), blockno, tupoffset);
2388 
2389 
2390  if (all_visible)
2391  visible = true;
2392  else
2393  visible = SampleHeapTupleVisible(scan, hscan->rs_cbuf,
2394  tuple, tupoffset);
2395 
2396  /* in pagemode, heapgetpage did this for us */
2397  if (!pagemode)
2398  HeapCheckForSerializableConflictOut(visible, scan->rs_rd, tuple,
2399  hscan->rs_cbuf, scan->rs_snapshot);
2400 
2401  /* Try next tuple from same page. */
2402  if (!visible)
2403  continue;
2404 
2405  /* Found visible tuple, return it. */
2406  if (!pagemode)
2408 
2409  ExecStoreBufferHeapTuple(tuple, slot, hscan->rs_cbuf);
2410 
2411  /* Count successfully-fetched tuples as heap fetches */
2413 
2414  return true;
2415  }
2416  else
2417  {
2418  /*
2419  * If we get here, it means we've exhausted the items on this page
2420  * and it's time to move to the next.
2421  */
2422  if (!pagemode)
2424 
2425  ExecClearTuple(slot);
2426  return false;
2427  }
2428  }
2429 
2430  Assert(0);
2431 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
BlockNumber rs_cblock
Definition: heapam.h:59
#define PageIsAllVisible(page)
Definition: bufpage.h:385
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
uint32 BlockNumber
Definition: block.h:31
uint32 rs_flags
Definition: relscan.h:47
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleData rs_ctup
Definition: heapam.h:66
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
NextSampleTuple_function NextSampleTuple
Definition: tsmapi.h:74
ItemPointerData t_self
Definition: htup.h:65
static bool SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer, HeapTuple tuple, OffsetNumber tupoffset)
uint32 t_len
Definition: htup.h:64
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9846
Buffer rs_cbuf
Definition: heapam.h:60
#define Assert(condition)
Definition: c.h:804
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
bool takenDuringRecovery
Definition: snapshot.h:184
struct TsmRoutine * tsmroutine
Definition: execnodes.h:1356
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:100
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1533
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127

◆ heapam_slot_callbacks()

static const TupleTableSlotOps* heapam_slot_callbacks ( Relation  relation)
static

Definition at line 67 of file heapam_handler.c.

References TTSOpsBufferHeapTuple.

Referenced by SampleHeapTupleVisible().

68 {
69  return &TTSOpsBufferHeapTuple;
70 }
const TupleTableSlotOps TTSOpsBufferHeapTuple
Definition: execTuples.c:86

◆ heapam_tuple_complete_speculative()

static void heapam_tuple_complete_speculative ( Relation  relation,
TupleTableSlot slot,
uint32  specToken,
bool  succeeded 
)
static

Definition at line 283 of file heapam_handler.c.

References ExecFetchSlotHeapTuple(), heap_abort_speculative(), heap_finish_speculative(), pfree(), and TupleTableSlot::tts_tid.

Referenced by SampleHeapTupleVisible().

285 {
286  bool shouldFree = true;
287  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
288 
289  /* adjust the tuple's state accordingly */
290  if (succeeded)
291  heap_finish_speculative(relation, &slot->tts_tid);
292  else
293  heap_abort_speculative(relation, &slot->tts_tid);
294 
295  if (shouldFree)
296  pfree(tuple);
297 }
void heap_abort_speculative(Relation relation, ItemPointer tid)
Definition: heapam.c:5860
void pfree(void *pointer)
Definition: mcxt.c:1057
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1614
void heap_finish_speculative(Relation relation, ItemPointer tid)
Definition: heapam.c:5769
ItemPointerData tts_tid
Definition: tuptable.h:130

◆ heapam_tuple_delete()

static TM_Result heapam_tuple_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  snapshot,
Snapshot  crosscheck,
bool  wait,
TM_FailureData tmfd,
bool  changingPart 
)
static

Definition at line 300 of file heapam_handler.c.

References heap_delete().

Referenced by SampleHeapTupleVisible().

303 {
304  /*
305  * Currently Deleting of index tuples are handled at vacuum, in case if
306  * the storage itself is cleaning the dead tuples by itself, it is the
307  * time to call the index tuple deletion also.
308  */
309  return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart);
310 }
TM_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: heapam.c:2740

◆ heapam_tuple_insert()

static void heapam_tuple_insert ( Relation  relation,
TupleTableSlot slot,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)
static

Definition at line 241 of file heapam_handler.c.

References ExecFetchSlotHeapTuple(), heap_insert(), ItemPointerCopy, pfree(), RelationGetRelid, HeapTupleData::t_self, HeapTupleData::t_tableOid, TupleTableSlot::tts_tableOid, and TupleTableSlot::tts_tid.

Referenced by SampleHeapTupleVisible().

243 {
244  bool shouldFree = true;
245  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
246 
247  /* Update the tuple with table oid */
248  slot->tts_tableOid = RelationGetRelid(relation);
249  tuple->t_tableOid = slot->tts_tableOid;
250 
251  /* Perform the insertion, and copy the resulting ItemPointer */
252  heap_insert(relation, tuple, cid, options, bistate);
253  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
254 
255  if (shouldFree)
256  pfree(tuple);
257 }
void heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
Definition: heapam.c:2036
Oid tts_tableOid
Definition: tuptable.h:131
void pfree(void *pointer)
Definition: mcxt.c:1057
ItemPointerData t_self
Definition: htup.h:65
Oid t_tableOid
Definition: htup.h:66
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1614
ItemPointerData tts_tid
Definition: tuptable.h:130
#define RelationGetRelid(relation)
Definition: rel.h:457
#define ItemPointerCopy(fromPointer, toPointer)
Definition: itemptr.h:161

◆ heapam_tuple_insert_speculative()

static void heapam_tuple_insert_speculative ( Relation  relation,
TupleTableSlot slot,
CommandId  cid,
int  options,
BulkInsertState  bistate,
uint32  specToken 
)
static

Definition at line 260 of file heapam_handler.c.

References ExecFetchSlotHeapTuple(), heap_insert(), HEAP_INSERT_SPECULATIVE, HeapTupleHeaderSetSpeculativeToken, ItemPointerCopy, pfree(), RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_self, HeapTupleData::t_tableOid, TupleTableSlot::tts_tableOid, and TupleTableSlot::tts_tid.

Referenced by SampleHeapTupleVisible().

263 {
264  bool shouldFree = true;
265  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
266 
267  /* Update the tuple with table oid */
268  slot->tts_tableOid = RelationGetRelid(relation);
269  tuple->t_tableOid = slot->tts_tableOid;
270 
271  HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
273 
274  /* Perform the insertion, and copy the resulting ItemPointer */
275  heap_insert(relation, tuple, cid, options, bistate);
276  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
277 
278  if (shouldFree)
279  pfree(tuple);
280 }
void heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
Definition: heapam.c:2036
Oid tts_tableOid
Definition: tuptable.h:131
#define HeapTupleHeaderSetSpeculativeToken(tup, token)
Definition: htup_details.h:440
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:37
HeapTupleHeader t_data
Definition: htup.h:68
void pfree(void *pointer)
Definition: mcxt.c:1057
ItemPointerData t_self
Definition: htup.h:65
Oid t_tableOid
Definition: htup.h:66
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1614
ItemPointerData tts_tid
Definition: tuptable.h:130
#define RelationGetRelid(relation)
Definition: rel.h:457
#define ItemPointerCopy(fromPointer, toPointer)
Definition: itemptr.h:161

◆ heapam_tuple_lock()

static TM_Result heapam_tuple_lock ( Relation  relation,
ItemPointer  tid,
Snapshot  snapshot,
TupleTableSlot slot,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
uint8  flags,
TM_FailureData tmfd 
)
static

Definition at line 348 of file heapam_handler.c.

References Assert, BufferHeapTupleTableSlot::base, BufferIsValid, TM_FailureData::cmax, ConditionalXactLockTableWait(), TM_FailureData::ctid, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), errmsg_internal(), ERROR, ExecStorePinnedBufferHeapTuple(), heap_fetch(), heap_lock_tuple(), HeapTupleHeaderGetCmin(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIsSpeculative, InitDirtySnapshot, ItemPointerEquals(), ItemPointerIndicatesMovedPartitions, LockWaitBlock, LockWaitError, LockWaitSkip, RelationGetRelationName, RelationGetRelid, ReleaseBuffer(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_Deleted, TM_SelfModified, TM_Updated, TM_WouldBlock, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TransactionIdIsValid, TM_FailureData::traversed, TTS_IS_BUFFERTUPLE, TupleTableSlot::tts_tableOid, HeapTupleTableSlot::tupdata, TUPLE_LOCK_FLAG_FIND_LAST_VERSION, TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS, XactLockTableWait(), XLTW_FetchUpdated, TM_FailureData::xmax, SnapshotData::xmax, and SnapshotData::xmin.

Referenced by SampleHeapTupleVisible().

352 {
354  TM_Result result;
355  Buffer buffer;
356  HeapTuple tuple = &bslot->base.tupdata;
357  bool follow_updates;
358 
359  follow_updates = (flags & TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS) != 0;
360  tmfd->traversed = false;
361 
362  Assert(TTS_IS_BUFFERTUPLE(slot));
363 
364 tuple_lock_retry:
365  tuple->t_self = *tid;
366  result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy,
367  follow_updates, &buffer, tmfd);
368 
369  if (result == TM_Updated &&
371  {
372  /* Should not encounter speculative tuple on recheck */
374 
375  ReleaseBuffer(buffer);
376 
377  if (!ItemPointerEquals(&tmfd->ctid, &tuple->t_self))
378  {
379  SnapshotData SnapshotDirty;
380  TransactionId priorXmax;
381 
382  /* it was updated, so look at the updated version */
383  *tid = tmfd->ctid;
384  /* updated row should have xmin matching this xmax */
385  priorXmax = tmfd->xmax;
386 
387  /* signal that a tuple later in the chain is getting locked */
388  tmfd->traversed = true;
389 
390  /*
391  * fetch target tuple
392  *
393  * Loop here to deal with updated or busy tuples
394  */
395  InitDirtySnapshot(SnapshotDirty);
396  for (;;)
397  {
399  ereport(ERROR,
400  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
401  errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
402 
403  tuple->t_self = *tid;
404  if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer))
405  {
406  /*
407  * If xmin isn't what we're expecting, the slot must have
408  * been recycled and reused for an unrelated tuple. This
409  * implies that the latest version of the row was deleted,
410  * so we need do nothing. (Should be safe to examine xmin
411  * without getting buffer's content lock. We assume
412  * reading a TransactionId to be atomic, and Xmin never
413  * changes in an existing tuple, except to invalid or
414  * frozen, and neither of those can match priorXmax.)
415  */
417  priorXmax))
418  {
419  ReleaseBuffer(buffer);
420  return TM_Deleted;
421  }
422 
423  /* otherwise xmin should not be dirty... */
424  if (TransactionIdIsValid(SnapshotDirty.xmin))
425  ereport(ERROR,
427  errmsg_internal("t_xmin is uncommitted in tuple to be updated")));
428 
429  /*
430  * If tuple is being updated by other transaction then we
431  * have to wait for its commit/abort, or die trying.
432  */
433  if (TransactionIdIsValid(SnapshotDirty.xmax))
434  {
435  ReleaseBuffer(buffer);
436  switch (wait_policy)
437  {
438  case LockWaitBlock:
439  XactLockTableWait(SnapshotDirty.xmax,
440  relation, &tuple->t_self,
442  break;
443  case LockWaitSkip:
444  if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
445  /* skip instead of waiting */
446  return TM_WouldBlock;
447  break;
448  case LockWaitError:
449  if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
450  ereport(ERROR,
451  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
452  errmsg("could not obtain lock on row in relation \"%s\"",
453  RelationGetRelationName(relation))));
454  break;
455  }
456  continue; /* loop back to repeat heap_fetch */
457  }
458 
459  /*
460  * If tuple was inserted by our own transaction, we have
461  * to check cmin against cid: cmin >= current CID means
462  * our command cannot see the tuple, so we should ignore
463  * it. Otherwise heap_lock_tuple() will throw an error,
464  * and so would any later attempt to update or delete the
465  * tuple. (We need not check cmax because
466  * HeapTupleSatisfiesDirty will consider a tuple deleted
467  * by our transaction dead, regardless of cmax.) We just
468  * checked that priorXmax == xmin, so we can test that
469  * variable instead of doing HeapTupleHeaderGetXmin again.
470  */
471  if (TransactionIdIsCurrentTransactionId(priorXmax) &&
472  HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
473  {
474  tmfd->xmax = priorXmax;
475 
476  /*
477  * Cmin is the problematic value, so store that. See
478  * above.
479  */
480  tmfd->cmax = HeapTupleHeaderGetCmin(tuple->t_data);
481  ReleaseBuffer(buffer);
482  return TM_SelfModified;
483  }
484 
485  /*
486  * This is a live tuple, so try to lock it again.
487  */
488  ReleaseBuffer(buffer);
489  goto tuple_lock_retry;
490  }
491 
492  /*
493  * If the referenced slot was actually empty, the latest
494  * version of the row must have been deleted, so we need do
495  * nothing.
496  */
497  if (tuple->t_data == NULL)
498  {
499  return TM_Deleted;
500  }
501 
502  /*
503  * As above, if xmin isn't what we're expecting, do nothing.
504  */
506  priorXmax))
507  {
508  if (BufferIsValid(buffer))
509  ReleaseBuffer(buffer);
510  return TM_Deleted;
511  }
512 
513  /*
514  * If we get here, the tuple was found but failed
515  * SnapshotDirty. Assuming the xmin is either a committed xact
516  * or our own xact (as it certainly should be if we're trying
517  * to modify the tuple), this must mean that the row was
518  * updated or deleted by either a committed xact or our own
519  * xact. If it was deleted, we can ignore it; if it was
520  * updated then chain up to the next version and repeat the
521  * whole process.
522  *
523  * As above, it should be safe to examine xmax and t_ctid
524  * without the buffer content lock, because they can't be
525  * changing.
526  */
527  if (ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
528  {
529  /* deleted, so forget about it */
530  if (BufferIsValid(buffer))
531  ReleaseBuffer(buffer);
532  return TM_Deleted;
533  }
534 
535  /* updated, so look at the updated row */
536  *tid = tuple->t_data->t_ctid;
537  /* updated row should have xmin matching this xmax */
538  priorXmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
539  if (BufferIsValid(buffer))
540  ReleaseBuffer(buffer);
541  /* loop back to fetch next in chain */
542  }
543  }
544  else
545  {
546  /* tuple was deleted, so give up */
547  return TM_Deleted;
548  }
549  }
550 
551  slot->tts_tableOid = RelationGetRelid(relation);
552  tuple->t_tableOid = slot->tts_tableOid;
553 
554  /* store in slot, transferring existing pin */
555  ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
556 
557  return result;
558 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
Oid tts_tableOid
Definition: tuptable.h:131
ItemPointerData ctid
Definition: tableam.h:126
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define InitDirtySnapshot(snapshotdata)
Definition: snapmgr.h:75
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:587
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
CommandId HeapTupleHeaderGetCmin(HeapTupleHeader tup)
Definition: combocid.c:104
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf)
Definition: heapam.c:1571
CommandId cmax
Definition: tableam.h:128
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
int errcode(int sqlerrcode)
Definition: elog.c:694
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3700
#define TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS
Definition: tableam.h:240
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:712
HeapTupleHeader t_data
Definition: htup.h:68
TransactionId xmax
Definition: tableam.h:127
#define ERROR
Definition: elog.h:45
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
#define RelationGetRelationName(relation)
Definition: rel.h:491
Oid t_tableOid
Definition: htup.h:66
TransactionId xmax
Definition: snapshot.h:158
TransactionId xmin
Definition: snapshot.h:157
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
TM_Result
Definition: tableam.h:71
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:231
#define ItemPointerIndicatesMovedPartitions(pointer)
Definition: itemptr.h:184
#define ereport(elevel,...)
Definition: elog.h:155
int errmsg_internal(const char *fmt,...)
Definition: elog.c:992
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:639
#define Assert(condition)
Definition: c.h:804
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define TUPLE_LOCK_FLAG_FIND_LAST_VERSION
Definition: tableam.h:242
TupleTableSlot * ExecStorePinnedBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1388
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
Definition: heapam.c:4269
int errmsg(const char *fmt,...)
Definition: elog.c:905
HeapTupleTableSlot base
Definition: tuptable.h:259
#define TransactionIdIsValid(xid)
Definition: transam.h:41
bool traversed
Definition: tableam.h:129
HeapTupleData tupdata
Definition: tuptable.h:253
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:457

◆ heapam_tuple_satisfies_snapshot()

static bool heapam_tuple_satisfies_snapshot ( Relation  rel,
TupleTableSlot slot,
Snapshot  snapshot 
)
static

Definition at line 213 of file heapam_handler.c.

References Assert, BufferHeapTupleTableSlot::base, BufferHeapTupleTableSlot::buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferIsValid, HeapTupleSatisfiesVisibility(), LockBuffer(), TTS_IS_BUFFERTUPLE, and HeapTupleTableSlot::tuple.

Referenced by SampleHeapTupleVisible().

215 {
217  bool res;
218 
219  Assert(TTS_IS_BUFFERTUPLE(slot));
220  Assert(BufferIsValid(bslot->buffer));
221 
222  /*
223  * We need buffer pin and lock to call HeapTupleSatisfiesVisibility.
224  * Caller should be holding pin, but not lock.
225  */
227  res = HeapTupleSatisfiesVisibility(bslot->base.tuple, snapshot,
228  bslot->buffer);
230 
231  return res;
232 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
HeapTuple tuple
Definition: tuptable.h:250
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
#define Assert(condition)
Definition: c.h:804
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
HeapTupleTableSlot base
Definition: tuptable.h:259
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heapam_tuple_tid_valid()

static bool heapam_tuple_tid_valid ( TableScanDesc  scan,
ItemPointer  tid 
)
static

Definition at line 204 of file heapam_handler.c.

References ItemPointerGetBlockNumber, ItemPointerIsValid, and HeapScanDescData::rs_nblocks.

Referenced by SampleHeapTupleVisible().

205 {
206  HeapScanDesc hscan = (HeapScanDesc) scan;
207 
208  return ItemPointerIsValid(tid) &&
210 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
BlockNumber rs_nblocks
Definition: heapam.h:52
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98

◆ heapam_tuple_update()

static TM_Result heapam_tuple_update ( Relation  relation,
ItemPointer  otid,
TupleTableSlot slot,
CommandId  cid,
Snapshot  snapshot,
Snapshot  crosscheck,
bool  wait,
TM_FailureData tmfd,
LockTupleMode lockmode,
bool update_indexes 
)
static

Definition at line 314 of file heapam_handler.c.

References ExecFetchSlotHeapTuple(), heap_update(), HeapTupleIsHeapOnly, ItemPointerCopy, pfree(), RelationGetRelid, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_Ok, TupleTableSlot::tts_tableOid, and TupleTableSlot::tts_tid.

Referenced by SampleHeapTupleVisible().

318 {
319  bool shouldFree = true;
320  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
321  TM_Result result;
322 
323  /* Update the tuple with table oid */
324  slot->tts_tableOid = RelationGetRelid(relation);
325  tuple->t_tableOid = slot->tts_tableOid;
326 
327  result = heap_update(relation, otid, tuple, cid, crosscheck, wait,
328  tmfd, lockmode);
329  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
330 
331  /*
332  * Decide whether new index entries are needed for the tuple
333  *
334  * Note: heap_update returns the tid (location) of the new tuple in the
335  * t_self field.
336  *
337  * If it's a HOT update, we mustn't insert new index entries.
338  */
339  *update_indexes = result == TM_Ok && !HeapTupleIsHeapOnly(tuple);
340 
341  if (shouldFree)
342  pfree(tuple);
343 
344  return result;
345 }
Oid tts_tableOid
Definition: tuptable.h:131
void pfree(void *pointer)
Definition: mcxt.c:1057
ItemPointerData t_self
Definition: htup.h:65
Oid t_tableOid
Definition: htup.h:66
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1614
TM_Result
Definition: tableam.h:71
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
Definition: tableam.h:77
TM_Result heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode)
Definition: heapam.c:3190
ItemPointerData tts_tid
Definition: tuptable.h:130
#define RelationGetRelid(relation)
Definition: rel.h:457
#define ItemPointerCopy(fromPointer, toPointer)
Definition: itemptr.h:161

◆ reform_and_rewrite_tuple()

static void reform_and_rewrite_tuple ( HeapTuple  tuple,
Relation  OldHeap,
Relation  NewHeap,
Datum values,
bool isnull,
RewriteState  rwstate 
)
static

Definition at line 2456 of file heapam_handler.c.

References heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), i, TupleDescData::natts, RelationGetDescr, rewrite_heap_tuple(), and TupleDescAttr.

Referenced by heapam_relation_copy_for_cluster().

2459 {
2460  TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
2461  TupleDesc newTupDesc = RelationGetDescr(NewHeap);
2462  HeapTuple copiedTuple;
2463  int i;
2464 
2465  heap_deform_tuple(tuple, oldTupDesc, values, isnull);
2466 
2467  /* Be sure to null out any dropped columns */
2468  for (i = 0; i < newTupDesc->natts; i++)
2469  {
2470  if (TupleDescAttr(newTupDesc, i)->attisdropped)
2471  isnull[i] = true;
2472  }
2473 
2474  copiedTuple = heap_form_tuple(newTupDesc, values, isnull);
2475 
2476  /* The heap rewrite module does the rest */
2477  rewrite_heap_tuple(rwstate, tuple, copiedTuple);
2478 
2479  heap_freetuple(copiedTuple);
2480 }
#define RelationGetDescr(relation)
Definition: rel.h:483
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:1020
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1249
static Datum values[MAXATTR]
Definition: bootstrap.c:165
int i
void rewrite_heap_tuple(RewriteState state, HeapTuple old_tuple, HeapTuple new_tuple)
Definition: rewriteheap.c:362

◆ SampleHeapTupleVisible()

static bool SampleHeapTupleVisible ( TableScanDesc  scan,
Buffer  buffer,
HeapTuple  tuple,
OffsetNumber  tupoffset 
)
static

Definition at line 2486 of file heapam_handler.c.

References heap_beginscan(), heap_endscan(), heap_fetch_toast_slice(), heap_get_latest_tid(), heap_getnextslot(), heap_getnextslot_tidrange(), heap_index_delete_tuples(), heap_multi_insert(), heap_rescan(), heap_set_tidrange(), heap_vacuum_rel(), heapam_estimate_rel_size(), heapam_fetch_row_version(), heapam_index_build_range_scan(), heapam_index_fetch_begin(), heapam_index_fetch_end(), heapam_index_fetch_reset(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_data(), heapam_relation_copy_for_cluster(), heapam_relation_needs_toast_table(), heapam_relation_nontransactional_truncate(), heapam_relation_set_new_filenode(), heapam_relation_toast_am(), heapam_scan_analyze_next_block(), heapam_scan_analyze_next_tuple(), heapam_scan_bitmap_next_block(), heapam_scan_bitmap_next_tuple(), heapam_scan_sample_next_block(), heapam_scan_sample_next_tuple(), heapam_slot_callbacks(), heapam_tuple_complete_speculative(), heapam_tuple_delete(), heapam_tuple_insert(), heapam_tuple_insert_speculative(), heapam_tuple_lock(), heapam_tuple_satisfies_snapshot(), heapam_tuple_tid_valid(), heapam_tuple_update(), HeapTupleSatisfiesVisibility(), TableScanDescData::rs_flags, HeapScanDescData::rs_ntuples, TableScanDescData::rs_snapshot, HeapScanDescData::rs_vistuples, SO_ALLOW_PAGEMODE, T_TableAmRoutine, table_block_parallelscan_estimate(), table_block_parallelscan_initialize(), table_block_parallelscan_reinitialize(), table_block_relation_size(), and TableAmRoutine::type.

Referenced by heapam_scan_sample_next_tuple().

2489 {
2490  HeapScanDesc hscan = (HeapScanDesc) scan;
2491 
2492  if (scan->rs_flags & SO_ALLOW_PAGEMODE)
2493  {
2494  /*
2495  * In pageatatime mode, heapgetpage() already did visibility checks,
2496  * so just look at the info it left in rs_vistuples[].
2497  *
2498  * We use a binary search over the known-sorted array. Note: we could
2499  * save some effort if we insisted that NextSampleTuple select tuples
2500  * in increasing order, but it's not clear that there would be enough
2501  * gain to justify the restriction.
2502  */
2503  int start = 0,
2504  end = hscan->rs_ntuples - 1;
2505 
2506  while (start <= end)
2507  {
2508  int mid = (start + end) / 2;
2509  OffsetNumber curoffset = hscan->rs_vistuples[mid];
2510 
2511  if (tupoffset == curoffset)
2512  return true;
2513  else if (tupoffset < curoffset)
2514  end = mid - 1;
2515  else
2516  start = mid + 1;
2517  }
2518 
2519  return false;
2520  }
2521  else
2522  {
2523  /* Otherwise, we have to check the tuple individually. */
2524  return HeapTupleSatisfiesVisibility(tuple, scan->rs_snapshot,
2525  buffer);
2526  }
2527 }
uint32 rs_flags
Definition: relscan.h:47
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
uint16 OffsetNumber
Definition: off.h:24
int rs_ntuples
Definition: heapam.h:70
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
Definition: heapam.h:71
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

Variable Documentation

◆ heapam_methods

static const TableAmRoutine heapam_methods
static

Definition at line 58 of file heapam_handler.c.

Referenced by GetHeapamTableAmRoutine().