PostgreSQL Source Code  git master
heapam_handler.c File Reference
#include "postgres.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heaptoast.h"
#include "access/multixact.h"
#include "access/rewriteheap.h"
#include "access/tableam.h"
#include "access/tsmapi.h"
#include "access/xact.h"
#include "catalog/catalog.h"
#include "catalog/index.h"
#include "catalog/storage.h"
#include "catalog/storage_xlog.h"
#include "commands/progress.h"
#include "executor/executor.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "storage/bufpage.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "utils/builtins.h"
#include "utils/rel.h"
Include dependency graph for heapam_handler.c:

Go to the source code of this file.

Macros

#define HEAP_OVERHEAD_BYTES_PER_TUPLE   (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))
 
#define HEAP_USABLE_BYTES_PER_PAGE   (BLCKSZ - SizeOfPageHeaderData)
 

Functions

static void reform_and_rewrite_tuple (HeapTuple tuple, Relation OldHeap, Relation NewHeap, Datum *values, bool *isnull, RewriteState rwstate)
 
static bool SampleHeapTupleVisible (TableScanDesc scan, Buffer buffer, HeapTuple tuple, OffsetNumber tupoffset)
 
static BlockNumber heapam_scan_get_blocks_done (HeapScanDesc hscan)
 
static const TupleTableSlotOpsheapam_slot_callbacks (Relation relation)
 
static IndexFetchTableDataheapam_index_fetch_begin (Relation rel)
 
static void heapam_index_fetch_reset (IndexFetchTableData *scan)
 
static void heapam_index_fetch_end (IndexFetchTableData *scan)
 
static bool heapam_index_fetch_tuple (struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
 
static bool heapam_fetch_row_version (Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot)
 
static bool heapam_tuple_tid_valid (TableScanDesc scan, ItemPointer tid)
 
static bool heapam_tuple_satisfies_snapshot (Relation rel, TupleTableSlot *slot, Snapshot snapshot)
 
static void heapam_tuple_insert (Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate)
 
static void heapam_tuple_insert_speculative (Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate, uint32 specToken)
 
static void heapam_tuple_complete_speculative (Relation relation, TupleTableSlot *slot, uint32 specToken, bool succeeded)
 
static TM_Result heapam_tuple_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
 
static TM_Result heapam_tuple_update (Relation relation, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, bool *update_indexes)
 
static TM_Result heapam_tuple_lock (Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd)
 
static void heapam_relation_set_new_filenode (Relation rel, const RelFileNode *newrnode, char persistence, TransactionId *freezeXid, MultiXactId *minmulti)
 
static void heapam_relation_nontransactional_truncate (Relation rel)
 
static void heapam_relation_copy_data (Relation rel, const RelFileNode *newrnode)
 
static void heapam_relation_copy_for_cluster (Relation OldHeap, Relation NewHeap, Relation OldIndex, bool use_sort, TransactionId OldestXmin, TransactionId *xid_cutoff, MultiXactId *multi_cutoff, double *num_tuples, double *tups_vacuumed, double *tups_recently_dead)
 
static bool heapam_scan_analyze_next_block (TableScanDesc scan, BlockNumber blockno, BufferAccessStrategy bstrategy)
 
static bool heapam_scan_analyze_next_tuple (TableScanDesc scan, TransactionId OldestXmin, double *liverows, double *deadrows, TupleTableSlot *slot)
 
static double heapam_index_build_range_scan (Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, bool allow_sync, bool anyvisible, bool progress, BlockNumber start_blockno, BlockNumber numblocks, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
 
static void heapam_index_validate_scan (Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, Snapshot snapshot, ValidateIndexState *state)
 
static bool heapam_relation_needs_toast_table (Relation rel)
 
static Oid heapam_relation_toast_am (Relation rel)
 
static void heapam_estimate_rel_size (Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
 
static bool heapam_scan_bitmap_next_block (TableScanDesc scan, TBMIterateResult *tbmres)
 
static bool heapam_scan_bitmap_next_tuple (TableScanDesc scan, TBMIterateResult *tbmres, TupleTableSlot *slot)
 
static bool heapam_scan_sample_next_block (TableScanDesc scan, SampleScanState *scanstate)
 
static bool heapam_scan_sample_next_tuple (TableScanDesc scan, SampleScanState *scanstate, TupleTableSlot *slot)
 
const TableAmRoutineGetHeapamTableAmRoutine (void)
 
Datum heap_tableam_handler (PG_FUNCTION_ARGS)
 

Variables

static const TableAmRoutine heapam_methods
 

Macro Definition Documentation

◆ HEAP_OVERHEAD_BYTES_PER_TUPLE

#define HEAP_OVERHEAD_BYTES_PER_TUPLE   (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))

Definition at line 2040 of file heapam_handler.c.

Referenced by heapam_estimate_rel_size().

◆ HEAP_USABLE_BYTES_PER_PAGE

#define HEAP_USABLE_BYTES_PER_PAGE   (BLCKSZ - SizeOfPageHeaderData)

Definition at line 2042 of file heapam_handler.c.

Referenced by heapam_estimate_rel_size().

Function Documentation

◆ GetHeapamTableAmRoutine()

const TableAmRoutine* GetHeapamTableAmRoutine ( void  )

Definition at line 2545 of file heapam_handler.c.

References heapam_methods.

Referenced by formrdesc(), heap_getnext(), and table_scan_sample_next_tuple().

2546 {
2547  return &heapam_methods;
2548 }
static const TableAmRoutine heapam_methods

◆ heap_tableam_handler()

Datum heap_tableam_handler ( PG_FUNCTION_ARGS  )

Definition at line 2551 of file heapam_handler.c.

References PG_RETURN_POINTER.

2552 {
2554 }
#define PG_RETURN_POINTER(x)
Definition: fmgr.h:360
static const TableAmRoutine heapam_methods

◆ heapam_estimate_rel_size()

static void heapam_estimate_rel_size ( Relation  rel,
int32 attr_widths,
BlockNumber pages,
double *  tuples,
double *  allvisfrac 
)
static

Definition at line 2046 of file heapam_handler.c.

References HEAP_OVERHEAD_BYTES_PER_TUPLE, HEAP_USABLE_BYTES_PER_PAGE, and table_block_relation_estimate_size().

Referenced by SampleHeapTupleVisible().

2049 {
2050  table_block_relation_estimate_size(rel, attr_widths, pages,
2051  tuples, allvisfrac,
2054 }
#define HEAP_OVERHEAD_BYTES_PER_TUPLE
#define HEAP_USABLE_BYTES_PER_PAGE
void table_block_relation_estimate_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
Definition: tableam.c:548

◆ heapam_fetch_row_version()

static bool heapam_fetch_row_version ( Relation  relation,
ItemPointer  tid,
Snapshot  snapshot,
TupleTableSlot slot 
)
static

Definition at line 179 of file heapam_handler.c.

References Assert, BufferHeapTupleTableSlot::base, ExecStorePinnedBufferHeapTuple(), heap_fetch(), RelationGetRelid, HeapTupleData::t_self, TTS_IS_BUFFERTUPLE, TupleTableSlot::tts_tableOid, and HeapTupleTableSlot::tupdata.

Referenced by SampleHeapTupleVisible().

183 {
185  Buffer buffer;
186 
187  Assert(TTS_IS_BUFFERTUPLE(slot));
188 
189  bslot->base.tupdata.t_self = *tid;
190  if (heap_fetch(relation, snapshot, &bslot->base.tupdata, &buffer))
191  {
192  /* store in slot, transferring existing pin */
193  ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata, slot, buffer);
194  slot->tts_tableOid = RelationGetRelid(relation);
195 
196  return true;
197  }
198 
199  return false;
200 }
Oid tts_tableOid
Definition: tuptable.h:131
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf)
Definition: heapam.c:1373
ItemPointerData t_self
Definition: htup.h:65
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:231
#define Assert(condition)
Definition: c.h:738
TupleTableSlot * ExecStorePinnedBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1388
HeapTupleTableSlot base
Definition: tuptable.h:259
HeapTupleData tupdata
Definition: tuptable.h:253
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:456

◆ heapam_index_build_range_scan()

static double heapam_index_build_range_scan ( Relation  heapRelation,
Relation  indexRelation,
IndexInfo indexInfo,
bool  allow_sync,
bool  anyvisible,
bool  progress,
BlockNumber  start_blockno,
BlockNumber  numblocks,
IndexBuildCallback  callback,
void *  callback_state,
TableScanDesc  scan 
)
static

Definition at line 1131 of file heapam_handler.c.

References Assert, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, callback(), CHECK_FOR_INTERRUPTS, CreateExecutorState(), ExprContext::ecxt_per_tuple_memory, ExprContext::ecxt_scantuple, elog, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, ExecDropSingleTupleTableSlot(), ExecPrepareQual(), ExecQual(), ExecStoreBufferHeapTuple(), FormIndexDatum(), ForwardScanDirection, FreeExecutorState(), GetOldestXmin(), GetPerTupleExprContext, GetTransactionSnapshot(), heap_get_root_tuples(), heap_getnext(), heap_setscanlimits(), heapam_scan_get_blocks_done(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleSatisfiesVacuum(), IndexInfo::ii_BrokenHotChain, IndexInfo::ii_Concurrent, IndexInfo::ii_ExclusionOps, IndexInfo::ii_ExpressionsState, IndexInfo::ii_Predicate, IndexInfo::ii_PredicateState, IndexInfo::ii_Unique, INDEX_MAX_KEYS, InvalidBlockNumber, InvalidTransactionId, IsBootstrapProcessingMode, IsMVCCSnapshot, IsSystemRelation(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, LockBuffer(), MaxHeapTuplesPerPage, MemoryContextReset(), NIL, OffsetNumberIsValid, OidIsValid, OldestXmin, pgstat_progress_update_param(), ParallelBlockTableScanDescData::phs_nblocks, PROCARRAY_FLAGS_VACUUM, PROGRESS_SCAN_BLOCKS_DONE, PROGRESS_SCAN_BLOCKS_TOTAL, RelationData::rd_rel, RegisterSnapshot(), RelationGetRelationName, HeapScanDescData::rs_base, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_nblocks, TableScanDescData::rs_parallel, TableScanDescData::rs_snapshot, SnapshotAny, HeapTupleData::t_data, HeapTupleData::t_self, table_beginscan_strat(), table_endscan(), table_slot_create(), TransactionIdIsCurrentTransactionId(), TransactionIdIsValid, UnregisterSnapshot(), values, WARNING, XactLockTableWait(), and XLTW_InsertIndexUnique.

Referenced by SampleHeapTupleVisible().

1142 {
1143  HeapScanDesc hscan;
1144  bool is_system_catalog;
1145  bool checking_uniqueness;
1146  HeapTuple heapTuple;
1148  bool isnull[INDEX_MAX_KEYS];
1149  double reltuples;
1150  ExprState *predicate;
1151  TupleTableSlot *slot;
1152  EState *estate;
1153  ExprContext *econtext;
1154  Snapshot snapshot;
1155  bool need_unregister_snapshot = false;
1157  BlockNumber previous_blkno = InvalidBlockNumber;
1158  BlockNumber root_blkno = InvalidBlockNumber;
1159  OffsetNumber root_offsets[MaxHeapTuplesPerPage];
1160 
1161  /*
1162  * sanity checks
1163  */
1164  Assert(OidIsValid(indexRelation->rd_rel->relam));
1165 
1166  /* Remember if it's a system catalog */
1167  is_system_catalog = IsSystemRelation(heapRelation);
1168 
1169  /* See whether we're verifying uniqueness/exclusion properties */
1170  checking_uniqueness = (indexInfo->ii_Unique ||
1171  indexInfo->ii_ExclusionOps != NULL);
1172 
1173  /*
1174  * "Any visible" mode is not compatible with uniqueness checks; make sure
1175  * only one of those is requested.
1176  */
1177  Assert(!(anyvisible && checking_uniqueness));
1178 
1179  /*
1180  * Need an EState for evaluation of index expressions and partial-index
1181  * predicates. Also a slot to hold the current tuple.
1182  */
1183  estate = CreateExecutorState();
1184  econtext = GetPerTupleExprContext(estate);
1185  slot = table_slot_create(heapRelation, NULL);
1186 
1187  /* Arrange for econtext's scan tuple to be the tuple under test */
1188  econtext->ecxt_scantuple = slot;
1189 
1190  /* Set up execution state for predicate, if any. */
1191  predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
1192 
1193  /*
1194  * Prepare for scan of the base relation. In a normal index build, we use
1195  * SnapshotAny because we must retrieve all tuples and do our own time
1196  * qual checks (because we have to index RECENTLY_DEAD tuples). In a
1197  * concurrent build, or during bootstrap, we take a regular MVCC snapshot
1198  * and index whatever's live according to that.
1199  */
1200  OldestXmin = InvalidTransactionId;
1201 
1202  /* okay to ignore lazy VACUUMs here */
1203  if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent)
1204  OldestXmin = GetOldestXmin(heapRelation, PROCARRAY_FLAGS_VACUUM);
1205 
1206  if (!scan)
1207  {
1208  /*
1209  * Serial index build.
1210  *
1211  * Must begin our own heap scan in this case. We may also need to
1212  * register a snapshot whose lifetime is under our direct control.
1213  */
1214  if (!TransactionIdIsValid(OldestXmin))
1215  {
1217  need_unregister_snapshot = true;
1218  }
1219  else
1220  snapshot = SnapshotAny;
1221 
1222  scan = table_beginscan_strat(heapRelation, /* relation */
1223  snapshot, /* snapshot */
1224  0, /* number of keys */
1225  NULL, /* scan key */
1226  true, /* buffer access strategy OK */
1227  allow_sync); /* syncscan OK? */
1228  }
1229  else
1230  {
1231  /*
1232  * Parallel index build.
1233  *
1234  * Parallel case never registers/unregisters own snapshot. Snapshot
1235  * is taken from parallel heap scan, and is SnapshotAny or an MVCC
1236  * snapshot, based on same criteria as serial case.
1237  */
1239  Assert(allow_sync);
1240  snapshot = scan->rs_snapshot;
1241  }
1242 
1243  hscan = (HeapScanDesc) scan;
1244 
1245  /* Publish number of blocks to scan */
1246  if (progress)
1247  {
1248  BlockNumber nblocks;
1249 
1250  if (hscan->rs_base.rs_parallel != NULL)
1251  {
1253 
1255  nblocks = pbscan->phs_nblocks;
1256  }
1257  else
1258  nblocks = hscan->rs_nblocks;
1259 
1261  nblocks);
1262  }
1263 
1264  /*
1265  * Must call GetOldestXmin() with SnapshotAny. Should never call
1266  * GetOldestXmin() with MVCC snapshot. (It's especially worth checking
1267  * this for parallel builds, since ambuild routines that support parallel
1268  * builds must work these details out for themselves.)
1269  */
1270  Assert(snapshot == SnapshotAny || IsMVCCSnapshot(snapshot));
1271  Assert(snapshot == SnapshotAny ? TransactionIdIsValid(OldestXmin) :
1272  !TransactionIdIsValid(OldestXmin));
1273  Assert(snapshot == SnapshotAny || !anyvisible);
1274 
1275  /* set our scan endpoints */
1276  if (!allow_sync)
1277  heap_setscanlimits(scan, start_blockno, numblocks);
1278  else
1279  {
1280  /* syncscan can only be requested on whole relation */
1281  Assert(start_blockno == 0);
1282  Assert(numblocks == InvalidBlockNumber);
1283  }
1284 
1285  reltuples = 0;
1286 
1287  /*
1288  * Scan all tuples in the base relation.
1289  */
1290  while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
1291  {
1292  bool tupleIsAlive;
1293 
1295 
1296  /* Report scan progress, if asked to. */
1297  if (progress)
1298  {
1299  BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
1300 
1301  if (blocks_done != previous_blkno)
1302  {
1304  blocks_done);
1305  previous_blkno = blocks_done;
1306  }
1307  }
1308 
1309  /*
1310  * When dealing with a HOT-chain of updated tuples, we want to index
1311  * the values of the live tuple (if any), but index it under the TID
1312  * of the chain's root tuple. This approach is necessary to preserve
1313  * the HOT-chain structure in the heap. So we need to be able to find
1314  * the root item offset for every tuple that's in a HOT-chain. When
1315  * first reaching a new page of the relation, call
1316  * heap_get_root_tuples() to build a map of root item offsets on the
1317  * page.
1318  *
1319  * It might look unsafe to use this information across buffer
1320  * lock/unlock. However, we hold ShareLock on the table so no
1321  * ordinary insert/update/delete should occur; and we hold pin on the
1322  * buffer continuously while visiting the page, so no pruning
1323  * operation can occur either.
1324  *
1325  * Also, although our opinions about tuple liveness could change while
1326  * we scan the page (due to concurrent transaction commits/aborts),
1327  * the chain root locations won't, so this info doesn't need to be
1328  * rebuilt after waiting for another transaction.
1329  *
1330  * Note the implied assumption that there is no more than one live
1331  * tuple per HOT-chain --- else we could create more than one index
1332  * entry pointing to the same root tuple.
1333  */
1334  if (hscan->rs_cblock != root_blkno)
1335  {
1336  Page page = BufferGetPage(hscan->rs_cbuf);
1337 
1339  heap_get_root_tuples(page, root_offsets);
1341 
1342  root_blkno = hscan->rs_cblock;
1343  }
1344 
1345  if (snapshot == SnapshotAny)
1346  {
1347  /* do our own time qual check */
1348  bool indexIt;
1349  TransactionId xwait;
1350 
1351  recheck:
1352 
1353  /*
1354  * We could possibly get away with not locking the buffer here,
1355  * since caller should hold ShareLock on the relation, but let's
1356  * be conservative about it. (This remark is still correct even
1357  * with HOT-pruning: our pin on the buffer prevents pruning.)
1358  */
1360 
1361  /*
1362  * The criteria for counting a tuple as live in this block need to
1363  * match what analyze.c's heapam_scan_analyze_next_tuple() does,
1364  * otherwise CREATE INDEX and ANALYZE may produce wildly different
1365  * reltuples values, e.g. when there are many recently-dead
1366  * tuples.
1367  */
1368  switch (HeapTupleSatisfiesVacuum(heapTuple, OldestXmin,
1369  hscan->rs_cbuf))
1370  {
1371  case HEAPTUPLE_DEAD:
1372  /* Definitely dead, we can ignore it */
1373  indexIt = false;
1374  tupleIsAlive = false;
1375  break;
1376  case HEAPTUPLE_LIVE:
1377  /* Normal case, index and unique-check it */
1378  indexIt = true;
1379  tupleIsAlive = true;
1380  /* Count it as live, too */
1381  reltuples += 1;
1382  break;
1384 
1385  /*
1386  * If tuple is recently deleted then we must index it
1387  * anyway to preserve MVCC semantics. (Pre-existing
1388  * transactions could try to use the index after we finish
1389  * building it, and may need to see such tuples.)
1390  *
1391  * However, if it was HOT-updated then we must only index
1392  * the live tuple at the end of the HOT-chain. Since this
1393  * breaks semantics for pre-existing snapshots, mark the
1394  * index as unusable for them.
1395  *
1396  * We don't count recently-dead tuples in reltuples, even
1397  * if we index them; see heapam_scan_analyze_next_tuple().
1398  */
1399  if (HeapTupleIsHotUpdated(heapTuple))
1400  {
1401  indexIt = false;
1402  /* mark the index as unsafe for old snapshots */
1403  indexInfo->ii_BrokenHotChain = true;
1404  }
1405  else
1406  indexIt = true;
1407  /* In any case, exclude the tuple from unique-checking */
1408  tupleIsAlive = false;
1409  break;
1411 
1412  /*
1413  * In "anyvisible" mode, this tuple is visible and we
1414  * don't need any further checks.
1415  */
1416  if (anyvisible)
1417  {
1418  indexIt = true;
1419  tupleIsAlive = true;
1420  reltuples += 1;
1421  break;
1422  }
1423 
1424  /*
1425  * Since caller should hold ShareLock or better, normally
1426  * the only way to see this is if it was inserted earlier
1427  * in our own transaction. However, it can happen in
1428  * system catalogs, since we tend to release write lock
1429  * before commit there. Give a warning if neither case
1430  * applies.
1431  */
1432  xwait = HeapTupleHeaderGetXmin(heapTuple->t_data);
1434  {
1435  if (!is_system_catalog)
1436  elog(WARNING, "concurrent insert in progress within table \"%s\"",
1437  RelationGetRelationName(heapRelation));
1438 
1439  /*
1440  * If we are performing uniqueness checks, indexing
1441  * such a tuple could lead to a bogus uniqueness
1442  * failure. In that case we wait for the inserting
1443  * transaction to finish and check again.
1444  */
1445  if (checking_uniqueness)
1446  {
1447  /*
1448  * Must drop the lock on the buffer before we wait
1449  */
1451  XactLockTableWait(xwait, heapRelation,
1452  &heapTuple->t_self,
1455  goto recheck;
1456  }
1457  }
1458  else
1459  {
1460  /*
1461  * For consistency with
1462  * heapam_scan_analyze_next_tuple(), count
1463  * HEAPTUPLE_INSERT_IN_PROGRESS tuples as live only
1464  * when inserted by our own transaction.
1465  */
1466  reltuples += 1;
1467  }
1468 
1469  /*
1470  * We must index such tuples, since if the index build
1471  * commits then they're good.
1472  */
1473  indexIt = true;
1474  tupleIsAlive = true;
1475  break;
1477 
1478  /*
1479  * As with INSERT_IN_PROGRESS case, this is unexpected
1480  * unless it's our own deletion or a system catalog; but
1481  * in anyvisible mode, this tuple is visible.
1482  */
1483  if (anyvisible)
1484  {
1485  indexIt = true;
1486  tupleIsAlive = false;
1487  reltuples += 1;
1488  break;
1489  }
1490 
1491  xwait = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1493  {
1494  if (!is_system_catalog)
1495  elog(WARNING, "concurrent delete in progress within table \"%s\"",
1496  RelationGetRelationName(heapRelation));
1497 
1498  /*
1499  * If we are performing uniqueness checks, assuming
1500  * the tuple is dead could lead to missing a
1501  * uniqueness violation. In that case we wait for the
1502  * deleting transaction to finish and check again.
1503  *
1504  * Also, if it's a HOT-updated tuple, we should not
1505  * index it but rather the live tuple at the end of
1506  * the HOT-chain. However, the deleting transaction
1507  * could abort, possibly leaving this tuple as live
1508  * after all, in which case it has to be indexed. The
1509  * only way to know what to do is to wait for the
1510  * deleting transaction to finish and check again.
1511  */
1512  if (checking_uniqueness ||
1513  HeapTupleIsHotUpdated(heapTuple))
1514  {
1515  /*
1516  * Must drop the lock on the buffer before we wait
1517  */
1519  XactLockTableWait(xwait, heapRelation,
1520  &heapTuple->t_self,
1523  goto recheck;
1524  }
1525 
1526  /*
1527  * Otherwise index it but don't check for uniqueness,
1528  * the same as a RECENTLY_DEAD tuple.
1529  */
1530  indexIt = true;
1531 
1532  /*
1533  * Count HEAPTUPLE_DELETE_IN_PROGRESS tuples as live,
1534  * if they were not deleted by the current
1535  * transaction. That's what
1536  * heapam_scan_analyze_next_tuple() does, and we want
1537  * the behavior to be consistent.
1538  */
1539  reltuples += 1;
1540  }
1541  else if (HeapTupleIsHotUpdated(heapTuple))
1542  {
1543  /*
1544  * It's a HOT-updated tuple deleted by our own xact.
1545  * We can assume the deletion will commit (else the
1546  * index contents don't matter), so treat the same as
1547  * RECENTLY_DEAD HOT-updated tuples.
1548  */
1549  indexIt = false;
1550  /* mark the index as unsafe for old snapshots */
1551  indexInfo->ii_BrokenHotChain = true;
1552  }
1553  else
1554  {
1555  /*
1556  * It's a regular tuple deleted by our own xact. Index
1557  * it, but don't check for uniqueness nor count in
1558  * reltuples, the same as a RECENTLY_DEAD tuple.
1559  */
1560  indexIt = true;
1561  }
1562  /* In any case, exclude the tuple from unique-checking */
1563  tupleIsAlive = false;
1564  break;
1565  default:
1566  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1567  indexIt = tupleIsAlive = false; /* keep compiler quiet */
1568  break;
1569  }
1570 
1572 
1573  if (!indexIt)
1574  continue;
1575  }
1576  else
1577  {
1578  /* heap_getnext did the time qual check */
1579  tupleIsAlive = true;
1580  reltuples += 1;
1581  }
1582 
1584 
1585  /* Set up for predicate or expression evaluation */
1586  ExecStoreBufferHeapTuple(heapTuple, slot, hscan->rs_cbuf);
1587 
1588  /*
1589  * In a partial index, discard tuples that don't satisfy the
1590  * predicate.
1591  */
1592  if (predicate != NULL)
1593  {
1594  if (!ExecQual(predicate, econtext))
1595  continue;
1596  }
1597 
1598  /*
1599  * For the current heap tuple, extract all the attributes we use in
1600  * this index, and note which are null. This also performs evaluation
1601  * of any expressions needed.
1602  */
1603  FormIndexDatum(indexInfo,
1604  slot,
1605  estate,
1606  values,
1607  isnull);
1608 
1609  /*
1610  * You'd think we should go ahead and build the index tuple here, but
1611  * some index AMs want to do further processing on the data first. So
1612  * pass the values[] and isnull[] arrays, instead.
1613  */
1614 
1615  if (HeapTupleIsHeapOnly(heapTuple))
1616  {
1617  /*
1618  * For a heap-only tuple, pretend its TID is that of the root. See
1619  * src/backend/access/heap/README.HOT for discussion.
1620  */
1621  ItemPointerData tid;
1622  OffsetNumber offnum;
1623 
1624  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
1625 
1626  if (!OffsetNumberIsValid(root_offsets[offnum - 1]))
1627  ereport(ERROR,
1629  errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
1630  ItemPointerGetBlockNumber(&heapTuple->t_self),
1631  offnum,
1632  RelationGetRelationName(heapRelation))));
1633 
1634  ItemPointerSet(&tid, ItemPointerGetBlockNumber(&heapTuple->t_self),
1635  root_offsets[offnum - 1]);
1636 
1637  /* Call the AM's callback routine to process the tuple */
1638  callback(indexRelation, &tid, values, isnull, tupleIsAlive,
1639  callback_state);
1640  }
1641  else
1642  {
1643  /* Call the AM's callback routine to process the tuple */
1644  callback(indexRelation, &heapTuple->t_self, values, isnull,
1645  tupleIsAlive, callback_state);
1646  }
1647  }
1648 
1649  /* Report scan progress one last time. */
1650  if (progress)
1651  {
1652  BlockNumber blks_done;
1653 
1654  if (hscan->rs_base.rs_parallel != NULL)
1655  {
1657 
1659  blks_done = pbscan->phs_nblocks;
1660  }
1661  else
1662  blks_done = hscan->rs_nblocks;
1663 
1665  blks_done);
1666  }
1667 
1668  table_endscan(scan);
1669 
1670  /* we can now forget our snapshot, if set and registered by us */
1671  if (need_unregister_snapshot)
1672  UnregisterSnapshot(snapshot);
1673 
1675 
1676  FreeExecutorState(estate);
1677 
1678  /* These may have been pointing to the now-gone estate */
1679  indexInfo->ii_ExpressionsState = NIL;
1680  indexInfo->ii_PredicateState = NULL;
1681 
1682  return reltuples;
1683 }
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:77
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
void FormIndexDatum(IndexInfo *indexInfo, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
Definition: index.c:2595
#define NIL
Definition: pg_list.h:65
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
BlockNumber rs_cblock
Definition: heapam.h:59
List * ii_Predicate
Definition: execnodes.h:163
bool IsSystemRelation(Relation relation)
Definition: catalog.c:68
uint32 TransactionId
Definition: c.h:513
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:865
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:854
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:82
TableScanDescData rs_base
Definition: heapam.h:49
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3235
ExprState * ii_PredicateState
Definition: execnodes.h:164
MemoryContext ecxt_per_tuple_memory
Definition: execnodes.h:234
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
int errcode(int sqlerrcode)
Definition: elog.c:610
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:136
uint32 BlockNumber
Definition: block.h:31
#define PROCARRAY_FLAGS_VACUUM
Definition: procarray.h:52
Form_pg_class rd_rel
Definition: rel.h:109
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition: executor.h:370
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:306
#define OidIsValid(objectId)
Definition: c.h:644
static TableScanDesc table_beginscan_strat(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, bool allow_strat, bool allow_sync)
Definition: tableam.h:778
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
void FreeExecutorState(EState *estate)
Definition: execUtils.c:191
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:676
#define GetPerTupleExprContext(estate)
Definition: executor.h:507
List * ii_ExpressionsState
Definition: execnodes.h:162
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition: heapam.c:1275
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
Definition: heapam.c:329
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:490
ExprState * ExecPrepareQual(List *qual, EState *estate)
Definition: execExpr.c:520
static TransactionId OldestXmin
Definition: vacuumlazy.c:325
bool ii_BrokenHotChain
Definition: execnodes.h:175
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
void heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
Definition: pruneheap.c:745
EState * CreateExecutorState(void)
Definition: execUtils.c:89
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:907
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
#define WARNING
Definition: elog.h:40
int progress
Definition: pgbench.c:234
#define PROGRESS_SCAN_BLOCKS_DONE
Definition: progress.h:120
uintptr_t Datum
Definition: postgres.h:367
BlockNumber rs_nblocks
Definition: heapam.h:52
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define ereport(elevel,...)
Definition: elog.h:144
TransactionId GetOldestXmin(Relation rel, int flags)
Definition: procarray.c:1305
Buffer rs_cbuf
Definition: heapam.h:60
int errmsg_internal(const char *fmt,...)
Definition: elog.c:911
bool ii_Unique
Definition: execnodes.h:172
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:624
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
#define Assert(condition)
Definition: c.h:738
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define INDEX_MAX_KEYS
#define InvalidBlockNumber
Definition: block.h:33
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:226
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define PROGRESS_SCAN_BLOCKS_TOTAL
Definition: progress.h:119
bool ii_Concurrent
Definition: execnodes.h:174
#define SnapshotAny
Definition: snapmgr.h:69
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:862
static Datum values[MAXATTR]
Definition: bootstrap.c:167
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:392
Oid * ii_ExclusionOps
Definition: execnodes.h:165
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
#define elog(elevel,...)
Definition: elog.h:214
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:45
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static BlockNumber heapam_scan_get_blocks_done(HeapScanDesc hscan)
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127

◆ heapam_index_fetch_begin()

static IndexFetchTableData* heapam_index_fetch_begin ( Relation  rel)
static

Definition at line 78 of file heapam_handler.c.

References InvalidBuffer, palloc0(), IndexFetchTableData::rel, IndexFetchHeapData::xs_base, and IndexFetchHeapData::xs_cbuf.

Referenced by SampleHeapTupleVisible().

79 {
81 
82  hscan->xs_base.rel = rel;
83  hscan->xs_cbuf = InvalidBuffer;
84 
85  return &hscan->xs_base;
86 }
#define InvalidBuffer
Definition: buf.h:25
Buffer xs_cbuf
Definition: heapam.h:82
void * palloc0(Size size)
Definition: mcxt.c:980
Relation rel
Definition: relscan.h:91
IndexFetchTableData xs_base
Definition: heapam.h:80

◆ heapam_index_fetch_end()

static void heapam_index_fetch_end ( IndexFetchTableData scan)
static

Definition at line 101 of file heapam_handler.c.

References heapam_index_fetch_reset(), and pfree().

Referenced by SampleHeapTupleVisible().

102 {
103  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
104 
106 
107  pfree(hscan);
108 }
static void heapam_index_fetch_reset(IndexFetchTableData *scan)
void pfree(void *pointer)
Definition: mcxt.c:1056

◆ heapam_index_fetch_reset()

static void heapam_index_fetch_reset ( IndexFetchTableData scan)
static

Definition at line 89 of file heapam_handler.c.

References BufferIsValid, InvalidBuffer, ReleaseBuffer(), and IndexFetchHeapData::xs_cbuf.

Referenced by heapam_index_fetch_end(), and SampleHeapTupleVisible().

90 {
91  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
92 
93  if (BufferIsValid(hscan->xs_cbuf))
94  {
95  ReleaseBuffer(hscan->xs_cbuf);
96  hscan->xs_cbuf = InvalidBuffer;
97  }
98 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3483
Buffer xs_cbuf
Definition: heapam.h:82
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123

◆ heapam_index_fetch_tuple()

static bool heapam_index_fetch_tuple ( struct IndexFetchTableData scan,
ItemPointer  tid,
Snapshot  snapshot,
TupleTableSlot slot,
bool call_again,
bool all_dead 
)
static

Definition at line 111 of file heapam_handler.c.

References Assert, BufferHeapTupleTableSlot::base, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, ExecStoreBufferHeapTuple(), heap_hot_search_buffer(), heap_page_prune_opt(), IsMVCCSnapshot, ItemPointerGetBlockNumber, LockBuffer(), IndexFetchTableData::rel, RelationGetRelid, ReleaseAndReadBuffer(), HeapTupleData::t_self, TTS_IS_BUFFERTUPLE, TupleTableSlot::tts_tableOid, HeapTupleTableSlot::tupdata, IndexFetchHeapData::xs_base, and IndexFetchHeapData::xs_cbuf.

Referenced by SampleHeapTupleVisible().

116 {
117  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
119  bool got_heap_tuple;
120 
121  Assert(TTS_IS_BUFFERTUPLE(slot));
122 
123  /* We can skip the buffer-switching logic if we're in mid-HOT chain. */
124  if (!*call_again)
125  {
126  /* Switch to correct buffer if we don't have it already */
127  Buffer prev_buf = hscan->xs_cbuf;
128 
129  hscan->xs_cbuf = ReleaseAndReadBuffer(hscan->xs_cbuf,
130  hscan->xs_base.rel,
132 
133  /*
134  * Prune page, but only if we weren't already on this page
135  */
136  if (prev_buf != hscan->xs_cbuf)
137  heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf);
138  }
139 
140  /* Obtain share-lock on the buffer so we can examine visibility */
142  got_heap_tuple = heap_hot_search_buffer(tid,
143  hscan->xs_base.rel,
144  hscan->xs_cbuf,
145  snapshot,
146  &bslot->base.tupdata,
147  all_dead,
148  !*call_again);
149  bslot->base.tupdata.t_self = *tid;
151 
152  if (got_heap_tuple)
153  {
154  /*
155  * Only in a non-MVCC snapshot can more than one member of the HOT
156  * chain be visible.
157  */
158  *call_again = !IsMVCCSnapshot(snapshot);
159 
160  slot->tts_tableOid = RelationGetRelid(scan->rel);
161  ExecStoreBufferHeapTuple(&bslot->base.tupdata, slot, hscan->xs_cbuf);
162  }
163  else
164  {
165  /* We've reached the end of the HOT chain. */
166  *call_again = false;
167  }
168 
169  return got_heap_tuple;
170 }
Oid tts_tableOid
Definition: tuptable.h:131
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:1488
ItemPointerData t_self
Definition: htup.h:65
Buffer xs_cbuf
Definition: heapam.h:82
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define Assert(condition)
Definition: c.h:738
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:1531
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
HeapTupleTableSlot base
Definition: tuptable.h:259
void heap_page_prune_opt(Relation relation, Buffer buffer)
Definition: pruneheap.c:73
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
Relation rel
Definition: relscan.h:91
HeapTupleData tupdata
Definition: tuptable.h:253
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:456
IndexFetchTableData xs_base
Definition: heapam.h:80

◆ heapam_index_validate_scan()

static void heapam_index_validate_scan ( Relation  heapRelation,
Relation  indexRelation,
IndexInfo indexInfo,
Snapshot  snapshot,
ValidateIndexState state 
)
static

Definition at line 1686 of file heapam_handler.c.

References Assert, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CHECK_FOR_INTERRUPTS, CreateExecutorState(), DatumGetInt64, DatumGetPointer, ExprContext::ecxt_per_tuple_memory, ExprContext::ecxt_scantuple, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, ExecDropSingleTupleTableSlot(), ExecPrepareQual(), ExecQual(), ExecStoreHeapTuple(), FormIndexDatum(), ForwardScanDirection, FreeExecutorState(), GetPerTupleExprContext, heap_get_root_tuples(), heap_getnext(), HeapTupleIsHeapOnly, ValidateIndexState::htups, IndexInfo::ii_ExpressionsState, IndexInfo::ii_Predicate, IndexInfo::ii_PredicateState, IndexInfo::ii_Unique, index_insert(), INDEX_MAX_KEYS, InvalidBlockNumber, ItemPointerCompare(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSetOffsetNumber, itemptr_decode(), LockBuffer(), MakeSingleTupleTableSlot(), MaxHeapTuplesPerPage, MemoryContextReset(), NIL, OffsetNumberIsValid, OidIsValid, pfree(), pgstat_progress_update_param(), PROGRESS_SCAN_BLOCKS_DONE, PROGRESS_SCAN_BLOCKS_TOTAL, RelationData::rd_rel, RelationGetDescr, RelationGetRelationName, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_nblocks, HeapTupleData::t_self, table_beginscan_strat(), table_endscan(), TTSOpsHeapTuple, ValidateIndexState::tuplesort, tuplesort_getdatum(), ValidateIndexState::tups_inserted, UNIQUE_CHECK_NO, UNIQUE_CHECK_YES, and values.

Referenced by SampleHeapTupleVisible().

1691 {
1692  TableScanDesc scan;
1693  HeapScanDesc hscan;
1694  HeapTuple heapTuple;
1696  bool isnull[INDEX_MAX_KEYS];
1697  ExprState *predicate;
1698  TupleTableSlot *slot;
1699  EState *estate;
1700  ExprContext *econtext;
1701  BlockNumber root_blkno = InvalidBlockNumber;
1702  OffsetNumber root_offsets[MaxHeapTuplesPerPage];
1703  bool in_index[MaxHeapTuplesPerPage];
1704  BlockNumber previous_blkno = InvalidBlockNumber;
1705 
1706  /* state variables for the merge */
1707  ItemPointer indexcursor = NULL;
1708  ItemPointerData decoded;
1709  bool tuplesort_empty = false;
1710 
1711  /*
1712  * sanity checks
1713  */
1714  Assert(OidIsValid(indexRelation->rd_rel->relam));
1715 
1716  /*
1717  * Need an EState for evaluation of index expressions and partial-index
1718  * predicates. Also a slot to hold the current tuple.
1719  */
1720  estate = CreateExecutorState();
1721  econtext = GetPerTupleExprContext(estate);
1722  slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation),
1723  &TTSOpsHeapTuple);
1724 
1725  /* Arrange for econtext's scan tuple to be the tuple under test */
1726  econtext->ecxt_scantuple = slot;
1727 
1728  /* Set up execution state for predicate, if any. */
1729  predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
1730 
1731  /*
1732  * Prepare for scan of the base relation. We need just those tuples
1733  * satisfying the passed-in reference snapshot. We must disable syncscan
1734  * here, because it's critical that we read from block zero forward to
1735  * match the sorted TIDs.
1736  */
1737  scan = table_beginscan_strat(heapRelation, /* relation */
1738  snapshot, /* snapshot */
1739  0, /* number of keys */
1740  NULL, /* scan key */
1741  true, /* buffer access strategy OK */
1742  false); /* syncscan not OK */
1743  hscan = (HeapScanDesc) scan;
1744 
1746  hscan->rs_nblocks);
1747 
1748  /*
1749  * Scan all tuples matching the snapshot.
1750  */
1751  while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
1752  {
1753  ItemPointer heapcursor = &heapTuple->t_self;
1754  ItemPointerData rootTuple;
1755  OffsetNumber root_offnum;
1756 
1758 
1759  state->htups += 1;
1760 
1761  if ((previous_blkno == InvalidBlockNumber) ||
1762  (hscan->rs_cblock != previous_blkno))
1763  {
1765  hscan->rs_cblock);
1766  previous_blkno = hscan->rs_cblock;
1767  }
1768 
1769  /*
1770  * As commented in table_index_build_scan, we should index heap-only
1771  * tuples under the TIDs of their root tuples; so when we advance onto
1772  * a new heap page, build a map of root item offsets on the page.
1773  *
1774  * This complicates merging against the tuplesort output: we will
1775  * visit the live tuples in order by their offsets, but the root
1776  * offsets that we need to compare against the index contents might be
1777  * ordered differently. So we might have to "look back" within the
1778  * tuplesort output, but only within the current page. We handle that
1779  * by keeping a bool array in_index[] showing all the
1780  * already-passed-over tuplesort output TIDs of the current page. We
1781  * clear that array here, when advancing onto a new heap page.
1782  */
1783  if (hscan->rs_cblock != root_blkno)
1784  {
1785  Page page = BufferGetPage(hscan->rs_cbuf);
1786 
1788  heap_get_root_tuples(page, root_offsets);
1790 
1791  memset(in_index, 0, sizeof(in_index));
1792 
1793  root_blkno = hscan->rs_cblock;
1794  }
1795 
1796  /* Convert actual tuple TID to root TID */
1797  rootTuple = *heapcursor;
1798  root_offnum = ItemPointerGetOffsetNumber(heapcursor);
1799 
1800  if (HeapTupleIsHeapOnly(heapTuple))
1801  {
1802  root_offnum = root_offsets[root_offnum - 1];
1803  if (!OffsetNumberIsValid(root_offnum))
1804  ereport(ERROR,
1806  errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
1807  ItemPointerGetBlockNumber(heapcursor),
1808  ItemPointerGetOffsetNumber(heapcursor),
1809  RelationGetRelationName(heapRelation))));
1810  ItemPointerSetOffsetNumber(&rootTuple, root_offnum);
1811  }
1812 
1813  /*
1814  * "merge" by skipping through the index tuples until we find or pass
1815  * the current root tuple.
1816  */
1817  while (!tuplesort_empty &&
1818  (!indexcursor ||
1819  ItemPointerCompare(indexcursor, &rootTuple) < 0))
1820  {
1821  Datum ts_val;
1822  bool ts_isnull;
1823 
1824  if (indexcursor)
1825  {
1826  /*
1827  * Remember index items seen earlier on the current heap page
1828  */
1829  if (ItemPointerGetBlockNumber(indexcursor) == root_blkno)
1830  in_index[ItemPointerGetOffsetNumber(indexcursor) - 1] = true;
1831  }
1832 
1833  tuplesort_empty = !tuplesort_getdatum(state->tuplesort, true,
1834  &ts_val, &ts_isnull, NULL);
1835  Assert(tuplesort_empty || !ts_isnull);
1836  if (!tuplesort_empty)
1837  {
1838  itemptr_decode(&decoded, DatumGetInt64(ts_val));
1839  indexcursor = &decoded;
1840 
1841  /* If int8 is pass-by-ref, free (encoded) TID Datum memory */
1842 #ifndef USE_FLOAT8_BYVAL
1843  pfree(DatumGetPointer(ts_val));
1844 #endif
1845  }
1846  else
1847  {
1848  /* Be tidy */
1849  indexcursor = NULL;
1850  }
1851  }
1852 
1853  /*
1854  * If the tuplesort has overshot *and* we didn't see a match earlier,
1855  * then this tuple is missing from the index, so insert it.
1856  */
1857  if ((tuplesort_empty ||
1858  ItemPointerCompare(indexcursor, &rootTuple) > 0) &&
1859  !in_index[root_offnum - 1])
1860  {
1862 
1863  /* Set up for predicate or expression evaluation */
1864  ExecStoreHeapTuple(heapTuple, slot, false);
1865 
1866  /*
1867  * In a partial index, discard tuples that don't satisfy the
1868  * predicate.
1869  */
1870  if (predicate != NULL)
1871  {
1872  if (!ExecQual(predicate, econtext))
1873  continue;
1874  }
1875 
1876  /*
1877  * For the current heap tuple, extract all the attributes we use
1878  * in this index, and note which are null. This also performs
1879  * evaluation of any expressions needed.
1880  */
1881  FormIndexDatum(indexInfo,
1882  slot,
1883  estate,
1884  values,
1885  isnull);
1886 
1887  /*
1888  * You'd think we should go ahead and build the index tuple here,
1889  * but some index AMs want to do further processing on the data
1890  * first. So pass the values[] and isnull[] arrays, instead.
1891  */
1892 
1893  /*
1894  * If the tuple is already committed dead, you might think we
1895  * could suppress uniqueness checking, but this is no longer true
1896  * in the presence of HOT, because the insert is actually a proxy
1897  * for a uniqueness check on the whole HOT-chain. That is, the
1898  * tuple we have here could be dead because it was already
1899  * HOT-updated, and if so the updating transaction will not have
1900  * thought it should insert index entries. The index AM will
1901  * check the whole HOT-chain and correctly detect a conflict if
1902  * there is one.
1903  */
1904 
1905  index_insert(indexRelation,
1906  values,
1907  isnull,
1908  &rootTuple,
1909  heapRelation,
1910  indexInfo->ii_Unique ?
1912  indexInfo);
1913 
1914  state->tups_inserted += 1;
1915  }
1916  }
1917 
1918  table_endscan(scan);
1919 
1921 
1922  FreeExecutorState(estate);
1923 
1924  /* These may have been pointing to the now-gone estate */
1925  indexInfo->ii_ExpressionsState = NIL;
1926  indexInfo->ii_PredicateState = NULL;
1927 }
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
void FormIndexDatum(IndexInfo *indexInfo, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
Definition: index.c:2595
#define NIL
Definition: pg_list.h:65
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
bool tuplesort_getdatum(Tuplesortstate *state, bool forward, Datum *val, bool *isNull, Datum *abbrev)
Definition: tuplesort.c:2418
BlockNumber rs_cblock
Definition: heapam.h:59
List * ii_Predicate
Definition: execnodes.h:163
#define RelationGetDescr(relation)
Definition: rel.h:482
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1208
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3235
ExprState * ii_PredicateState
Definition: execnodes.h:164
MemoryContext ecxt_per_tuple_memory
Definition: execnodes.h:234
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
int errcode(int sqlerrcode)
Definition: elog.c:610
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:136
uint32 BlockNumber
Definition: block.h:31
Form_pg_class rd_rel
Definition: rel.h:109
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition: executor.h:370
#define OidIsValid(objectId)
Definition: c.h:644
static TableScanDesc table_beginscan_strat(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, bool allow_strat, bool allow_sync)
Definition: tableam.h:778
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
uint16 OffsetNumber
Definition: off.h:24
void FreeExecutorState(EState *estate)
Definition: execUtils.c:191
#define GetPerTupleExprContext(estate)
Definition: executor.h:507
List * ii_ExpressionsState
Definition: execnodes.h:162
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
#define DatumGetInt64(X)
Definition: postgres.h:607
Tuplesortstate * tuplesort
Definition: index.h:35
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition: heapam.c:1275
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
#define RelationGetRelationName(relation)
Definition: rel.h:490
ExprState * ExecPrepareQual(List *qual, EState *estate)
Definition: execExpr.c:520
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
void heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
Definition: pruneheap.c:745
EState * CreateExecutorState(void)
Definition: execUtils.c:89
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
#define PROGRESS_SCAN_BLOCKS_DONE
Definition: progress.h:120
uintptr_t Datum
Definition: postgres.h:367
BlockNumber rs_nblocks
Definition: heapam.h:52
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
static void itemptr_decode(ItemPointer itemptr, int64 encoded)
Definition: index.h:191
#define ereport(elevel,...)
Definition: elog.h:144
Buffer rs_cbuf
Definition: heapam.h:60
int errmsg_internal(const char *fmt,...)
Definition: elog.c:911
bool ii_Unique
Definition: execnodes.h:172
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
#define Assert(condition)
Definition: c.h:738
double tups_inserted
Definition: index.h:37
#define INDEX_MAX_KEYS
#define InvalidBlockNumber
Definition: block.h:33
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:226
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define PROGRESS_SCAN_BLOCKS_TOTAL
Definition: progress.h:119
#define DatumGetPointer(X)
Definition: postgres.h:549
double htups
Definition: index.h:37
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:148
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:862
static Datum values[MAXATTR]
Definition: bootstrap.c:167
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:84
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
TupleTableSlot * ExecStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1322
Pointer Page
Definition: bufpage.h:78
bool index_insert(Relation indexRelation, Datum *values, bool *isnull, ItemPointer heap_t_ctid, Relation heapRelation, IndexUniqueCheck checkUnique, IndexInfo *indexInfo)
Definition: indexam.c:176

◆ heapam_relation_copy_data()

static void heapam_relation_copy_data ( Relation  rel,
const RelFileNode newrnode 
)
static

Definition at line 621 of file heapam_handler.c.

References FlushRelationBuffers(), INIT_FORKNUM, log_smgrcreate(), MAIN_FORKNUM, MAX_FORKNUM, RelationData::rd_backend, RelationData::rd_rel, RelationData::rd_smgr, RelationCopyStorage(), RelationCreateStorage(), RelationDropStorage(), RelationOpenSmgr, smgrclose(), smgrcreate(), smgrexists(), and smgropen().

Referenced by SampleHeapTupleVisible().

622 {
623  SMgrRelation dstrel;
624 
625  dstrel = smgropen(*newrnode, rel->rd_backend);
626  RelationOpenSmgr(rel);
627 
628  /*
629  * Since we copy the file directly without looking at the shared buffers,
630  * we'd better first flush out any pages of the source relation that are
631  * in shared buffers. We assume no new changes will be made while we are
632  * holding exclusive lock on the rel.
633  */
635 
636  /*
637  * Create and copy all forks of the relation, and schedule unlinking of
638  * old physical files.
639  *
640  * NOTE: any conflict in relfilenode value will be caught in
641  * RelationCreateStorage().
642  */
643  RelationCreateStorage(*newrnode, rel->rd_rel->relpersistence);
644 
645  /* copy main fork */
647  rel->rd_rel->relpersistence);
648 
649  /* copy those extra forks that exist */
650  for (ForkNumber forkNum = MAIN_FORKNUM + 1;
651  forkNum <= MAX_FORKNUM; forkNum++)
652  {
653  if (smgrexists(rel->rd_smgr, forkNum))
654  {
655  smgrcreate(dstrel, forkNum, false);
656 
657  /*
658  * WAL log creation if the relation is persistent, or this is the
659  * init fork of an unlogged relation.
660  */
661  if (rel->rd_rel->relpersistence == RELPERSISTENCE_PERMANENT ||
662  (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
663  forkNum == INIT_FORKNUM))
664  log_smgrcreate(newrnode, forkNum);
665  RelationCopyStorage(rel->rd_smgr, dstrel, forkNum,
666  rel->rd_rel->relpersistence);
667  }
668  }
669 
670 
671  /* drop old relation, and close new one */
672  RelationDropStorage(rel);
673  smgrclose(dstrel);
674 }
void smgrclose(SMgrRelation reln)
Definition: smgr.c:256
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:333
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:247
Form_pg_class rd_rel
Definition: rel.h:109
#define RelationOpenSmgr(relation)
Definition: rel.h:513
SMgrRelation RelationCreateStorage(RelFileNode rnode, char relpersistence)
Definition: storage.c:118
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:145
ForkNumber
Definition: relpath.h:40
void RelationDropStorage(Relation rel)
Definition: storage.c:195
void RelationCopyStorage(SMgrRelation src, SMgrRelation dst, ForkNumber forkNum, char relpersistence)
Definition: storage.c:408
BackendId rd_backend
Definition: rel.h:59
void FlushRelationBuffers(Relation rel)
Definition: bufmgr.c:3224
#define MAX_FORKNUM
Definition: relpath.h:55
void log_smgrcreate(const RelFileNode *rnode, ForkNumber forkNum)
Definition: storage.c:175

◆ heapam_relation_copy_for_cluster()

static void heapam_relation_copy_for_cluster ( Relation  OldHeap,
Relation  NewHeap,
Relation  OldIndex,
bool  use_sort,
TransactionId  OldestXmin,
TransactionId xid_cutoff,
MultiXactId multi_cutoff,
double *  num_tuples,
double *  tups_vacuumed,
double *  tups_recently_dead 
)
static

Definition at line 677 of file heapam_handler.c.

References Assert, begin_heap_rewrite(), buf, BufferHeapTupleTableSlot::buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, CHECK_FOR_INTERRUPTS, elog, end_heap_rewrite(), ERROR, ExecDropSingleTupleTableSlot(), ExecFetchSlotHeapTuple(), ForwardScanDirection, HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleSatisfiesVacuum(), index_beginscan(), index_endscan(), index_getnext_slot(), index_rescan(), InvalidBlockNumber, IsSystemRelation(), LockBuffer(), maintenance_work_mem, TupleDescData::natts, palloc(), pfree(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_CLUSTER_HEAP_BLKS_SCANNED, PROGRESS_CLUSTER_HEAP_TUPLES_SCANNED, PROGRESS_CLUSTER_HEAP_TUPLES_WRITTEN, PROGRESS_CLUSTER_INDEX_RELID, PROGRESS_CLUSTER_PHASE, PROGRESS_CLUSTER_PHASE_INDEX_SCAN_HEAP, PROGRESS_CLUSTER_PHASE_SEQ_SCAN_HEAP, PROGRESS_CLUSTER_PHASE_SORT_TUPLES, PROGRESS_CLUSTER_PHASE_WRITE_NEW_HEAP, PROGRESS_CLUSTER_TOTAL_HEAP_BLKS, reform_and_rewrite_tuple(), RelationGetDescr, RelationGetRelationName, RelationGetRelid, RelationGetTargetBlock, rewrite_heap_dead_tuple(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_nblocks, SnapshotAny, HeapTupleData::t_data, table_beginscan(), table_endscan(), table_scan_getnextslot(), table_slot_create(), TransactionIdIsCurrentTransactionId(), tuplesort_begin_cluster(), tuplesort_end(), tuplesort_getheaptuple(), tuplesort_performsort(), tuplesort_putheaptuple(), values, WARNING, and IndexScanDescData::xs_recheck.

Referenced by SampleHeapTupleVisible().

685 {
686  RewriteState rwstate;
687  IndexScanDesc indexScan;
688  TableScanDesc tableScan;
689  HeapScanDesc heapScan;
690  bool is_system_catalog;
691  Tuplesortstate *tuplesort;
692  TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
693  TupleDesc newTupDesc = RelationGetDescr(NewHeap);
694  TupleTableSlot *slot;
695  int natts;
696  Datum *values;
697  bool *isnull;
699 
700  /* Remember if it's a system catalog */
701  is_system_catalog = IsSystemRelation(OldHeap);
702 
703  /*
704  * Valid smgr_targblock implies something already wrote to the relation.
705  * This may be harmless, but this function hasn't planned for it.
706  */
708 
709  /* Preallocate values/isnull arrays */
710  natts = newTupDesc->natts;
711  values = (Datum *) palloc(natts * sizeof(Datum));
712  isnull = (bool *) palloc(natts * sizeof(bool));
713 
714  /* Initialize the rewrite operation */
715  rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestXmin, *xid_cutoff,
716  *multi_cutoff);
717 
718 
719  /* Set up sorting if wanted */
720  if (use_sort)
721  tuplesort = tuplesort_begin_cluster(oldTupDesc, OldIndex,
723  NULL, false);
724  else
725  tuplesort = NULL;
726 
727  /*
728  * Prepare to scan the OldHeap. To ensure we see recently-dead tuples
729  * that still need to be copied, we scan with SnapshotAny and use
730  * HeapTupleSatisfiesVacuum for the visibility test.
731  */
732  if (OldIndex != NULL && !use_sort)
733  {
734  const int ci_index[] = {
737  };
738  int64 ci_val[2];
739 
740  /* Set phase and OIDOldIndex to columns */
742  ci_val[1] = RelationGetRelid(OldIndex);
743  pgstat_progress_update_multi_param(2, ci_index, ci_val);
744 
745  tableScan = NULL;
746  heapScan = NULL;
747  indexScan = index_beginscan(OldHeap, OldIndex, SnapshotAny, 0, 0);
748  index_rescan(indexScan, NULL, 0, NULL, 0);
749  }
750  else
751  {
752  /* In scan-and-sort mode and also VACUUM FULL, set phase */
755 
756  tableScan = table_beginscan(OldHeap, SnapshotAny, 0, (ScanKey) NULL);
757  heapScan = (HeapScanDesc) tableScan;
758  indexScan = NULL;
759 
760  /* Set total heap blocks */
762  heapScan->rs_nblocks);
763  }
764 
765  slot = table_slot_create(OldHeap, NULL);
766  hslot = (BufferHeapTupleTableSlot *) slot;
767 
768  /*
769  * Scan through the OldHeap, either in OldIndex order or sequentially;
770  * copy each tuple into the NewHeap, or transiently to the tuplesort
771  * module. Note that we don't bother sorting dead tuples (they won't get
772  * to the new table anyway).
773  */
774  for (;;)
775  {
776  HeapTuple tuple;
777  Buffer buf;
778  bool isdead;
779 
781 
782  if (indexScan != NULL)
783  {
784  if (!index_getnext_slot(indexScan, ForwardScanDirection, slot))
785  break;
786 
787  /* Since we used no scan keys, should never need to recheck */
788  if (indexScan->xs_recheck)
789  elog(ERROR, "CLUSTER does not support lossy index conditions");
790  }
791  else
792  {
793  if (!table_scan_getnextslot(tableScan, ForwardScanDirection, slot))
794  break;
795 
796  /*
797  * In scan-and-sort mode and also VACUUM FULL, set heap blocks
798  * scanned
799  */
801  heapScan->rs_cblock + 1);
802  }
803 
804  tuple = ExecFetchSlotHeapTuple(slot, false, NULL);
805  buf = hslot->buffer;
806 
808 
809  switch (HeapTupleSatisfiesVacuum(tuple, OldestXmin, buf))
810  {
811  case HEAPTUPLE_DEAD:
812  /* Definitely dead */
813  isdead = true;
814  break;
816  *tups_recently_dead += 1;
817  /* fall through */
818  case HEAPTUPLE_LIVE:
819  /* Live or recently dead, must copy it */
820  isdead = false;
821  break;
823 
824  /*
825  * Since we hold exclusive lock on the relation, normally the
826  * only way to see this is if it was inserted earlier in our
827  * own transaction. However, it can happen in system
828  * catalogs, since we tend to release write lock before commit
829  * there. Give a warning if neither case applies; but in any
830  * case we had better copy it.
831  */
832  if (!is_system_catalog &&
834  elog(WARNING, "concurrent insert in progress within table \"%s\"",
835  RelationGetRelationName(OldHeap));
836  /* treat as live */
837  isdead = false;
838  break;
840 
841  /*
842  * Similar situation to INSERT_IN_PROGRESS case.
843  */
844  if (!is_system_catalog &&
846  elog(WARNING, "concurrent delete in progress within table \"%s\"",
847  RelationGetRelationName(OldHeap));
848  /* treat as recently dead */
849  *tups_recently_dead += 1;
850  isdead = false;
851  break;
852  default:
853  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
854  isdead = false; /* keep compiler quiet */
855  break;
856  }
857 
859 
860  if (isdead)
861  {
862  *tups_vacuumed += 1;
863  /* heap rewrite module still needs to see it... */
864  if (rewrite_heap_dead_tuple(rwstate, tuple))
865  {
866  /* A previous recently-dead tuple is now known dead */
867  *tups_vacuumed += 1;
868  *tups_recently_dead -= 1;
869  }
870  continue;
871  }
872 
873  *num_tuples += 1;
874  if (tuplesort != NULL)
875  {
876  tuplesort_putheaptuple(tuplesort, tuple);
877 
878  /*
879  * In scan-and-sort mode, report increase in number of tuples
880  * scanned
881  */
883  *num_tuples);
884  }
885  else
886  {
887  const int ct_index[] = {
890  };
891  int64 ct_val[2];
892 
893  reform_and_rewrite_tuple(tuple, OldHeap, NewHeap,
894  values, isnull, rwstate);
895 
896  /*
897  * In indexscan mode and also VACUUM FULL, report increase in
898  * number of tuples scanned and written
899  */
900  ct_val[0] = *num_tuples;
901  ct_val[1] = *num_tuples;
902  pgstat_progress_update_multi_param(2, ct_index, ct_val);
903  }
904  }
905 
906  if (indexScan != NULL)
907  index_endscan(indexScan);
908  if (tableScan != NULL)
909  table_endscan(tableScan);
910  if (slot)
912 
913  /*
914  * In scan-and-sort mode, complete the sort, then read out all live tuples
915  * from the tuplestore and write them to the new relation.
916  */
917  if (tuplesort != NULL)
918  {
919  double n_tuples = 0;
920 
921  /* Report that we are now sorting tuples */
924 
925  tuplesort_performsort(tuplesort);
926 
927  /* Report that we are now writing new heap */
930 
931  for (;;)
932  {
933  HeapTuple tuple;
934 
936 
937  tuple = tuplesort_getheaptuple(tuplesort, true);
938  if (tuple == NULL)
939  break;
940 
941  n_tuples += 1;
943  OldHeap, NewHeap,
944  values, isnull,
945  rwstate);
946  /* Report n_tuples */
948  n_tuples);
949  }
950 
951  tuplesort_end(tuplesort);
952  }
953 
954  /* Write out any remaining tuples, and fsync if needed */
955  end_heap_rewrite(rwstate);
956 
957  /* Clean up */
958  pfree(values);
959  pfree(isnull);
960 }
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:77
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define PROGRESS_CLUSTER_PHASE_SEQ_SCAN_HEAP
Definition: progress.h:65
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
BlockNumber rs_cblock
Definition: heapam.h:59
void tuplesort_performsort(Tuplesortstate *state)
Definition: tuplesort.c:1964
void end_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:301
HeapTuple tuplesort_getheaptuple(Tuplesortstate *state, bool forward)
Definition: tuplesort.c:2369
bool IsSystemRelation(Relation relation)
Definition: catalog.c:68
#define PROGRESS_CLUSTER_HEAP_TUPLES_WRITTEN
Definition: progress.h:59
#define RelationGetDescr(relation)
Definition: rel.h:482
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:854
static void reform_and_rewrite_tuple(HeapTuple tuple, Relation OldHeap, Relation NewHeap, Datum *values, bool *isnull, RewriteState rwstate)
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3235
Tuplesortstate * tuplesort_begin_cluster(TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, bool randomAccess)
Definition: tuplesort.c:952
void index_rescan(IndexScanDesc scan, ScanKey keys, int nkeys, ScanKey orderbys, int norderbys)
Definition: indexam.c:295
#define PROGRESS_CLUSTER_INDEX_RELID
Definition: progress.h:57
#define PROGRESS_CLUSTER_PHASE_WRITE_NEW_HEAP
Definition: progress.h:68
static bool table_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition: tableam.h:903
#define RelationGetTargetBlock(relation)
Definition: rel.h:541
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleHeader t_data
Definition: htup.h:68
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ERROR
Definition: elog.h:43
static TableScanDesc table_beginscan(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key)
Definition: tableam.h:754
#define PROGRESS_CLUSTER_PHASE_SORT_TUPLES
Definition: progress.h:67
static char * buf
Definition: pg_test_fsync.c:67
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
#define PROGRESS_CLUSTER_HEAP_TUPLES_SCANNED
Definition: progress.h:58
#define RelationGetRelationName(relation)
Definition: rel.h:490
static TransactionId OldestXmin
Definition: vacuumlazy.c:325
void index_endscan(IndexScanDesc scan)
Definition: indexam.c:321
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1614
#define WARNING
Definition: elog.h:40
uintptr_t Datum
Definition: postgres.h:367
BlockNumber rs_nblocks
Definition: heapam.h:52
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
int maintenance_work_mem
Definition: globals.c:122
#define Assert(condition)
Definition: c.h:738
#define PROGRESS_CLUSTER_PHASE_INDEX_SCAN_HEAP
Definition: progress.h:66
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
Definition: pgstat.c:3257
#define InvalidBlockNumber
Definition: block.h:33
bool index_getnext_slot(IndexScanDesc scan, ScanDirection direction, TupleTableSlot *slot)
Definition: indexam.c:613
#define SnapshotAny
Definition: snapmgr.h:69
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:862
static Datum values[MAXATTR]
Definition: bootstrap.c:167
bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
Definition: rewriteheap.c:565
void * palloc(Size size)
Definition: mcxt.c:949
#define elog(elevel,...)
Definition: elog.h:214
void tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
Definition: tuplesort.c:1630
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define PROGRESS_CLUSTER_HEAP_BLKS_SCANNED
Definition: progress.h:61
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define PROGRESS_CLUSTER_TOTAL_HEAP_BLKS
Definition: progress.h:60
RewriteState begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin, TransactionId freeze_xid, MultiXactId cutoff_multi)
Definition: rewriteheap.c:237
void tuplesort_end(Tuplesortstate *state)
Definition: tuplesort.c:1388
#define PROGRESS_CLUSTER_PHASE
Definition: progress.h:56
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:456
IndexScanDesc index_beginscan(Relation heapRelation, Relation indexRelation, Snapshot snapshot, int nkeys, int norderbys)
Definition: indexam.c:203

◆ heapam_relation_needs_toast_table()

static bool heapam_relation_needs_toast_table ( Relation  rel)
static

Definition at line 1981 of file heapam_handler.c.

References att_align_nominal, BITMAPLEN, i, MAXALIGN, TupleDescData::natts, RelationData::rd_att, SizeofHeapTupleHeader, TOAST_TUPLE_THRESHOLD, TupleDescAttr, and type_maximum_size().

Referenced by SampleHeapTupleVisible().

1982 {
1983  int32 data_length = 0;
1984  bool maxlength_unknown = false;
1985  bool has_toastable_attrs = false;
1986  TupleDesc tupdesc = rel->rd_att;
1987  int32 tuple_length;
1988  int i;
1989 
1990  for (i = 0; i < tupdesc->natts; i++)
1991  {
1992  Form_pg_attribute att = TupleDescAttr(tupdesc, i);
1993 
1994  if (att->attisdropped)
1995  continue;
1996  data_length = att_align_nominal(data_length, att->attalign);
1997  if (att->attlen > 0)
1998  {
1999  /* Fixed-length types are never toastable */
2000  data_length += att->attlen;
2001  }
2002  else
2003  {
2004  int32 maxlen = type_maximum_size(att->atttypid,
2005  att->atttypmod);
2006 
2007  if (maxlen < 0)
2008  maxlength_unknown = true;
2009  else
2010  data_length += maxlen;
2011  if (att->attstorage != TYPSTORAGE_PLAIN)
2012  has_toastable_attrs = true;
2013  }
2014  }
2015  if (!has_toastable_attrs)
2016  return false; /* nothing to toast? */
2017  if (maxlength_unknown)
2018  return true; /* any unlimited-length attrs? */
2019  tuple_length = MAXALIGN(SizeofHeapTupleHeader +
2020  BITMAPLEN(tupdesc->natts)) +
2021  MAXALIGN(data_length);
2022  return (tuple_length > TOAST_TUPLE_THRESHOLD);
2023 }
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
#define att_align_nominal(cur_offset, attalign)
Definition: tupmacs.h:148
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
#define BITMAPLEN(NATTS)
Definition: htup_details.h:547
signed int int32
Definition: c.h:355
int32 type_maximum_size(Oid type_oid, int32 typemod)
Definition: format_type.c:396
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:193
#define TOAST_TUPLE_THRESHOLD
Definition: heaptoast.h:48
TupleDesc rd_att
Definition: rel.h:110
#define MAXALIGN(LEN)
Definition: c.h:691
int i

◆ heapam_relation_nontransactional_truncate()

static void heapam_relation_nontransactional_truncate ( Relation  rel)
static

Definition at line 615 of file heapam_handler.c.

References RelationTruncate().

Referenced by SampleHeapTupleVisible().

616 {
617  RelationTruncate(rel, 0);
618 }
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:277

◆ heapam_relation_set_new_filenode()

static void heapam_relation_set_new_filenode ( Relation  rel,
const RelFileNode newrnode,
char  persistence,
TransactionId freezeXid,
MultiXactId minmulti 
)
static

Definition at line 565 of file heapam_handler.c.

References Assert, GetOldestMultiXactId(), INIT_FORKNUM, log_smgrcreate(), RelationData::rd_rel, RecentXmin, RelationCreateStorage(), smgrclose(), smgrcreate(), and smgrimmedsync().

Referenced by SampleHeapTupleVisible().

570 {
571  SMgrRelation srel;
572 
573  /*
574  * Initialize to the minimum XID that could put tuples in the table. We
575  * know that no xacts older than RecentXmin are still running, so that
576  * will do.
577  */
578  *freezeXid = RecentXmin;
579 
580  /*
581  * Similarly, initialize the minimum Multixact to the first value that
582  * could possibly be stored in tuples in the table. Running transactions
583  * could reuse values from their local cache, so we are careful to
584  * consider all currently running multis.
585  *
586  * XXX this could be refined further, but is it worth the hassle?
587  */
588  *minmulti = GetOldestMultiXactId();
589 
590  srel = RelationCreateStorage(*newrnode, persistence);
591 
592  /*
593  * If required, set up an init fork for an unlogged table so that it can
594  * be correctly reinitialized on restart. An immediate sync is required
595  * even if the page has been logged, because the write did not go through
596  * shared_buffers and therefore a concurrent checkpoint may have moved the
597  * redo pointer past our xlog record. Recovery may as well remove it
598  * while replaying, for example, XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE
599  * record. Therefore, logging is necessary even if wal_level=minimal.
600  */
601  if (persistence == RELPERSISTENCE_UNLOGGED)
602  {
603  Assert(rel->rd_rel->relkind == RELKIND_RELATION ||
604  rel->rd_rel->relkind == RELKIND_MATVIEW ||
605  rel->rd_rel->relkind == RELKIND_TOASTVALUE);
606  smgrcreate(srel, INIT_FORKNUM, false);
607  log_smgrcreate(newrnode, INIT_FORKNUM);
609  }
610 
611  smgrclose(srel);
612 }
void smgrclose(SMgrRelation reln)
Definition: smgr.c:256
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:333
TransactionId RecentXmin
Definition: snapmgr.c:167
Form_pg_class rd_rel
Definition: rel.h:109
SMgrRelation RelationCreateStorage(RelFileNode rnode, char relpersistence)
Definition: storage.c:118
MultiXactId GetOldestMultiXactId(void)
Definition: multixact.c:2493
#define Assert(condition)
Definition: c.h:738
void smgrimmedsync(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:620
void log_smgrcreate(const RelFileNode *rnode, ForkNumber forkNum)
Definition: storage.c:175

◆ heapam_relation_toast_am()

static Oid heapam_relation_toast_am ( Relation  rel)
static

Definition at line 2029 of file heapam_handler.c.

References RelationData::rd_rel.

Referenced by SampleHeapTupleVisible().

2030 {
2031  return rel->rd_rel->relam;
2032 }
Form_pg_class rd_rel
Definition: rel.h:109

◆ heapam_scan_analyze_next_block()

static bool heapam_scan_analyze_next_block ( TableScanDesc  scan,
BlockNumber  blockno,
BufferAccessStrategy  bstrategy 
)
static

Definition at line 963 of file heapam_handler.c.

References BUFFER_LOCK_SHARE, FirstOffsetNumber, LockBuffer(), MAIN_FORKNUM, RBM_NORMAL, ReadBufferExtended(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, and TableScanDescData::rs_rd.

Referenced by SampleHeapTupleVisible().

965 {
966  HeapScanDesc hscan = (HeapScanDesc) scan;
967 
968  /*
969  * We must maintain a pin on the target page's buffer to ensure that
970  * concurrent activity - e.g. HOT pruning - doesn't delete tuples out from
971  * under us. Hence, pin the page until we are done looking at it. We
972  * also choose to hold sharelock on the buffer throughout --- we could
973  * release and re-acquire sharelock for each tuple, but since we aren't
974  * doing much work per tuple, the extra lock traffic is probably better
975  * avoided.
976  */
977  hscan->rs_cblock = blockno;
978  hscan->rs_cindex = FirstOffsetNumber;
980  blockno, RBM_NORMAL, bstrategy);
982 
983  /* in heap all blocks can contain tuples, so always return true */
984  return true;
985 }
BlockNumber rs_cblock
Definition: heapam.h:59
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:652
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
#define FirstOffsetNumber
Definition: off.h:27
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
Buffer rs_cbuf
Definition: heapam.h:60
Relation rs_rd
Definition: relscan.h:34
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97

◆ heapam_scan_analyze_next_tuple()

static bool heapam_scan_analyze_next_tuple ( TableScanDesc  scan,
TransactionId  OldestXmin,
double *  liverows,
double *  deadrows,
TupleTableSlot slot 
)
static

Definition at line 988 of file heapam_handler.c.

References Assert, BufferHeapTupleTableSlot::base, BufferGetPage, elog, ERROR, ExecClearTuple(), ExecStoreBufferHeapTuple(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleSatisfiesVacuum(), InvalidBuffer, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemPointerSet, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, RelationGetRelid, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, TableScanDescData::rs_rd, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdIsCurrentTransactionId(), TTS_IS_BUFFERTUPLE, HeapTupleTableSlot::tupdata, and UnlockReleaseBuffer().

Referenced by SampleHeapTupleVisible().

991 {
992  HeapScanDesc hscan = (HeapScanDesc) scan;
993  Page targpage;
994  OffsetNumber maxoffset;
996 
997  Assert(TTS_IS_BUFFERTUPLE(slot));
998 
999  hslot = (BufferHeapTupleTableSlot *) slot;
1000  targpage = BufferGetPage(hscan->rs_cbuf);
1001  maxoffset = PageGetMaxOffsetNumber(targpage);
1002 
1003  /* Inner loop over all tuples on the selected page */
1004  for (; hscan->rs_cindex <= maxoffset; hscan->rs_cindex++)
1005  {
1006  ItemId itemid;
1007  HeapTuple targtuple = &hslot->base.tupdata;
1008  bool sample_it = false;
1009 
1010  itemid = PageGetItemId(targpage, hscan->rs_cindex);
1011 
1012  /*
1013  * We ignore unused and redirect line pointers. DEAD line pointers
1014  * should be counted as dead, because we need vacuum to run to get rid
1015  * of them. Note that this rule agrees with the way that
1016  * heap_page_prune() counts things.
1017  */
1018  if (!ItemIdIsNormal(itemid))
1019  {
1020  if (ItemIdIsDead(itemid))
1021  *deadrows += 1;
1022  continue;
1023  }
1024 
1025  ItemPointerSet(&targtuple->t_self, hscan->rs_cblock, hscan->rs_cindex);
1026 
1027  targtuple->t_tableOid = RelationGetRelid(scan->rs_rd);
1028  targtuple->t_data = (HeapTupleHeader) PageGetItem(targpage, itemid);
1029  targtuple->t_len = ItemIdGetLength(itemid);
1030 
1031  switch (HeapTupleSatisfiesVacuum(targtuple, OldestXmin,
1032  hscan->rs_cbuf))
1033  {
1034  case HEAPTUPLE_LIVE:
1035  sample_it = true;
1036  *liverows += 1;
1037  break;
1038 
1039  case HEAPTUPLE_DEAD:
1041  /* Count dead and recently-dead rows */
1042  *deadrows += 1;
1043  break;
1044 
1046 
1047  /*
1048  * Insert-in-progress rows are not counted. We assume that
1049  * when the inserting transaction commits or aborts, it will
1050  * send a stats message to increment the proper count. This
1051  * works right only if that transaction ends after we finish
1052  * analyzing the table; if things happen in the other order,
1053  * its stats update will be overwritten by ours. However, the
1054  * error will be large only if the other transaction runs long
1055  * enough to insert many tuples, so assuming it will finish
1056  * after us is the safer option.
1057  *
1058  * A special case is that the inserting transaction might be
1059  * our own. In this case we should count and sample the row,
1060  * to accommodate users who load a table and analyze it in one
1061  * transaction. (pgstat_report_analyze has to adjust the
1062  * numbers we send to the stats collector to make this come
1063  * out right.)
1064  */
1066  {
1067  sample_it = true;
1068  *liverows += 1;
1069  }
1070  break;
1071 
1073 
1074  /*
1075  * We count and sample delete-in-progress rows the same as
1076  * live ones, so that the stats counters come out right if the
1077  * deleting transaction commits after us, per the same
1078  * reasoning given above.
1079  *
1080  * If the delete was done by our own transaction, however, we
1081  * must count the row as dead to make pgstat_report_analyze's
1082  * stats adjustments come out right. (Note: this works out
1083  * properly when the row was both inserted and deleted in our
1084  * xact.)
1085  *
1086  * The net effect of these choices is that we act as though an
1087  * IN_PROGRESS transaction hasn't happened yet, except if it
1088  * is our own transaction, which we assume has happened.
1089  *
1090  * This approach ensures that we behave sanely if we see both
1091  * the pre-image and post-image rows for a row being updated
1092  * by a concurrent transaction: we will sample the pre-image
1093  * but not the post-image. We also get sane results if the
1094  * concurrent transaction never commits.
1095  */
1097  *deadrows += 1;
1098  else
1099  {
1100  sample_it = true;
1101  *liverows += 1;
1102  }
1103  break;
1104 
1105  default:
1106  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1107  break;
1108  }
1109 
1110  if (sample_it)
1111  {
1112  ExecStoreBufferHeapTuple(targtuple, slot, hscan->rs_cbuf);
1113  hscan->rs_cindex++;
1114 
1115  /* note that we leave the buffer locked here! */
1116  return true;
1117  }
1118  }
1119 
1120  /* Now release the lock and pin on the page */
1121  UnlockReleaseBuffer(hscan->rs_cbuf);
1122  hscan->rs_cbuf = InvalidBuffer;
1123 
1124  /* also prevent old slot contents from having pin on page */
1125  ExecClearTuple(slot);
1126 
1127  return false;
1128 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
BlockNumber rs_cblock
Definition: heapam.h:59
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:854
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3506
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
static TransactionId OldestXmin
Definition: vacuumlazy.c:325
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:231
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
Buffer rs_cbuf
Definition: heapam.h:60
#define Assert(condition)
Definition: c.h:738
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Relation rs_rd
Definition: relscan.h:34
#define elog(elevel,...)
Definition: elog.h:214
HeapTupleTableSlot base
Definition: tuptable.h:259
HeapTupleData tupdata
Definition: tuptable.h:253
#define RelationGetRelid(relation)
Definition: rel.h:456
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127

◆ heapam_scan_bitmap_next_block()

static bool heapam_scan_bitmap_next_block ( TableScanDesc  scan,
TBMIterateResult tbmres 
)
static

Definition at line 2063 of file heapam_handler.c.

References Assert, TBMIterateResult::blockno, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, FirstOffsetNumber, heap_hot_search_buffer(), heap_page_prune_opt(), HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetXmin, HeapTupleSatisfiesVisibility(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetOffsetNumber, ItemPointerSet, LockBuffer(), MaxHeapTuplesPerPage, TBMIterateResult::ntuples, OffsetNumberNext, TBMIterateResult::offsets, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTID(), RelationData::rd_id, ReleaseAndReadBuffer(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, HeapScanDescData::rs_nblocks, HeapScanDescData::rs_ntuples, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_vistuples, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by SampleHeapTupleVisible().

2065 {
2066  HeapScanDesc hscan = (HeapScanDesc) scan;
2067  BlockNumber page = tbmres->blockno;
2068  Buffer buffer;
2069  Snapshot snapshot;
2070  int ntup;
2071 
2072  hscan->rs_cindex = 0;
2073  hscan->rs_ntuples = 0;
2074 
2075  /*
2076  * Ignore any claimed entries past what we think is the end of the
2077  * relation. It may have been extended after the start of our scan (we
2078  * only hold an AccessShareLock, and it could be inserts from this
2079  * backend).
2080  */
2081  if (page >= hscan->rs_nblocks)
2082  return false;
2083 
2084  /*
2085  * Acquire pin on the target heap page, trading in any pin we held before.
2086  */
2087  hscan->rs_cbuf = ReleaseAndReadBuffer(hscan->rs_cbuf,
2088  scan->rs_rd,
2089  page);
2090  hscan->rs_cblock = page;
2091  buffer = hscan->rs_cbuf;
2092  snapshot = scan->rs_snapshot;
2093 
2094  ntup = 0;
2095 
2096  /*
2097  * Prune and repair fragmentation for the whole page, if possible.
2098  */
2099  heap_page_prune_opt(scan->rs_rd, buffer);
2100 
2101  /*
2102  * We must hold share lock on the buffer content while examining tuple
2103  * visibility. Afterwards, however, the tuples we have found to be
2104  * visible are guaranteed good as long as we hold the buffer pin.
2105  */
2106  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2107 
2108  /*
2109  * We need two separate strategies for lossy and non-lossy cases.
2110  */
2111  if (tbmres->ntuples >= 0)
2112  {
2113  /*
2114  * Bitmap is non-lossy, so we just look through the offsets listed in
2115  * tbmres; but we have to follow any HOT chain starting at each such
2116  * offset.
2117  */
2118  int curslot;
2119 
2120  for (curslot = 0; curslot < tbmres->ntuples; curslot++)
2121  {
2122  OffsetNumber offnum = tbmres->offsets[curslot];
2123  ItemPointerData tid;
2124  HeapTupleData heapTuple;
2125 
2126  ItemPointerSet(&tid, page, offnum);
2127  if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
2128  &heapTuple, NULL, true))
2129  hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
2130  }
2131  }
2132  else
2133  {
2134  /*
2135  * Bitmap is lossy, so we must examine each line pointer on the page.
2136  * But we can ignore HOT chains, since we'll check each tuple anyway.
2137  */
2138  Page dp = (Page) BufferGetPage(buffer);
2139  OffsetNumber maxoff = PageGetMaxOffsetNumber(dp);
2140  OffsetNumber offnum;
2141 
2142  for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
2143  {
2144  ItemId lp;
2145  HeapTupleData loctup;
2146  bool valid;
2147 
2148  lp = PageGetItemId(dp, offnum);
2149  if (!ItemIdIsNormal(lp))
2150  continue;
2151  loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
2152  loctup.t_len = ItemIdGetLength(lp);
2153  loctup.t_tableOid = scan->rs_rd->rd_id;
2154  ItemPointerSet(&loctup.t_self, page, offnum);
2155  valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
2156  if (valid)
2157  {
2158  hscan->rs_vistuples[ntup++] = offnum;
2159  PredicateLockTID(scan->rs_rd, &loctup.t_self, snapshot,
2160  HeapTupleHeaderGetXmin(loctup.t_data));
2161  }
2162  HeapCheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
2163  buffer, snapshot);
2164  }
2165  }
2166 
2167  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2168 
2169  Assert(ntup <= MaxHeapTuplesPerPage);
2170  hscan->rs_ntuples = ntup;
2171 
2172  return ntup > 0;
2173 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
BlockNumber rs_cblock
Definition: heapam.h:59
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
BlockNumber blockno
Definition: tidbitmap.h:42
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:1488
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]
Definition: tidbitmap.h:46
#define FirstOffsetNumber
Definition: off.h:27
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
Oid rd_id
Definition: rel.h:111
int rs_ntuples
Definition: heapam.h:70
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
BlockNumber rs_nblocks
Definition: heapam.h:52
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2545
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:8994
Buffer rs_cbuf
Definition: heapam.h:60
#define Assert(condition)
Definition: c.h:738
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
Definition: heapam.h:71
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:1531
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
void heap_page_prune_opt(Relation relation, Buffer buffer)
Definition: pruneheap.c:73
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heapam_scan_bitmap_next_tuple()

static bool heapam_scan_bitmap_next_tuple ( TableScanDesc  scan,
TBMIterateResult tbmres,
TupleTableSlot slot 
)
static

Definition at line 2176 of file heapam_handler.c.

References Assert, BufferGetPage, ExecStoreBufferHeapTuple(), ItemIdGetLength, ItemIdIsNormal, ItemPointerSet, PageGetItem, PageGetItemId, pgstat_count_heap_fetch, RelationData::rd_id, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, HeapScanDescData::rs_ctup, HeapScanDescData::rs_ntuples, TableScanDescData::rs_rd, HeapScanDescData::rs_vistuples, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by SampleHeapTupleVisible().

2179 {
2180  HeapScanDesc hscan = (HeapScanDesc) scan;
2181  OffsetNumber targoffset;
2182  Page dp;
2183  ItemId lp;
2184 
2185  /*
2186  * Out of range? If so, nothing more to look at on this page
2187  */
2188  if (hscan->rs_cindex < 0 || hscan->rs_cindex >= hscan->rs_ntuples)
2189  return false;
2190 
2191  targoffset = hscan->rs_vistuples[hscan->rs_cindex];
2192  dp = (Page) BufferGetPage(hscan->rs_cbuf);
2193  lp = PageGetItemId(dp, targoffset);
2194  Assert(ItemIdIsNormal(lp));
2195 
2196  hscan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
2197  hscan->rs_ctup.t_len = ItemIdGetLength(lp);
2198  hscan->rs_ctup.t_tableOid = scan->rs_rd->rd_id;
2199  ItemPointerSet(&hscan->rs_ctup.t_self, hscan->rs_cblock, targoffset);
2200 
2202 
2203  /*
2204  * Set up the result slot to point to this tuple. Note that the slot
2205  * acquires a pin on the buffer.
2206  */
2208  slot,
2209  hscan->rs_cbuf);
2210 
2211  hscan->rs_cindex++;
2212 
2213  return true;
2214 }
BlockNumber rs_cblock
Definition: heapam.h:59
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleData rs_ctup
Definition: heapam.h:66
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_self
Definition: htup.h:65
#define pgstat_count_heap_fetch(rel)
Definition: pgstat.h:1406
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
Oid rd_id
Definition: rel.h:111
int rs_ntuples
Definition: heapam.h:70
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
Buffer rs_cbuf
Definition: heapam.h:60
#define Assert(condition)
Definition: c.h:738
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
Definition: heapam.h:71
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Relation rs_rd
Definition: relscan.h:34
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127

◆ heapam_scan_get_blocks_done()

static BlockNumber heapam_scan_get_blocks_done ( HeapScanDesc  hscan)
static

Definition at line 1936 of file heapam_handler.c.

References ParallelBlockTableScanDescData::phs_nblocks, ParallelBlockTableScanDescData::phs_startblock, HeapScanDescData::rs_base, HeapScanDescData::rs_cblock, HeapScanDescData::rs_nblocks, TableScanDescData::rs_parallel, and HeapScanDescData::rs_startblock.

Referenced by heapam_index_build_range_scan().

1937 {
1938  ParallelBlockTableScanDesc bpscan = NULL;
1939  BlockNumber startblock;
1940  BlockNumber blocks_done;
1941 
1942  if (hscan->rs_base.rs_parallel != NULL)
1943  {
1945  startblock = bpscan->phs_startblock;
1946  }
1947  else
1948  startblock = hscan->rs_startblock;
1949 
1950  /*
1951  * Might have wrapped around the end of the relation, if startblock was
1952  * not zero.
1953  */
1954  if (hscan->rs_cblock > startblock)
1955  blocks_done = hscan->rs_cblock - startblock;
1956  else
1957  {
1958  BlockNumber nblocks;
1959 
1960  nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
1961  blocks_done = nblocks - startblock +
1962  hscan->rs_cblock;
1963  }
1964 
1965  return blocks_done;
1966 }
BlockNumber rs_cblock
Definition: heapam.h:59
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:82
TableScanDescData rs_base
Definition: heapam.h:49
uint32 BlockNumber
Definition: block.h:31
BlockNumber rs_startblock
Definition: heapam.h:53
BlockNumber rs_nblocks
Definition: heapam.h:52
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:45

◆ heapam_scan_sample_next_block()

static bool heapam_scan_sample_next_block ( TableScanDesc  scan,
SampleScanState scanstate 
)
static

Definition at line 2217 of file heapam_handler.c.

References Assert, BlockNumberIsValid, BufferIsValid, heapgetpage(), InvalidBlockNumber, InvalidBuffer, TsmRoutine::NextSampleBlock, ReleaseBuffer(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, TableScanDescData::rs_flags, HeapScanDescData::rs_inited, HeapScanDescData::rs_nblocks, TableScanDescData::rs_rd, HeapScanDescData::rs_startblock, SO_ALLOW_SYNC, ss_report_location(), and SampleScanState::tsmroutine.

Referenced by SampleHeapTupleVisible().

2218 {
2219  HeapScanDesc hscan = (HeapScanDesc) scan;
2220  TsmRoutine *tsm = scanstate->tsmroutine;
2221  BlockNumber blockno;
2222 
2223  /* return false immediately if relation is empty */
2224  if (hscan->rs_nblocks == 0)
2225  return false;
2226 
2227  if (tsm->NextSampleBlock)
2228  {
2229  blockno = tsm->NextSampleBlock(scanstate, hscan->rs_nblocks);
2230  hscan->rs_cblock = blockno;
2231  }
2232  else
2233  {
2234  /* scanning table sequentially */
2235 
2236  if (hscan->rs_cblock == InvalidBlockNumber)
2237  {
2238  Assert(!hscan->rs_inited);
2239  blockno = hscan->rs_startblock;
2240  }
2241  else
2242  {
2243  Assert(hscan->rs_inited);
2244 
2245  blockno = hscan->rs_cblock + 1;
2246 
2247  if (blockno >= hscan->rs_nblocks)
2248  {
2249  /* wrap to beginning of rel, might not have started at 0 */
2250  blockno = 0;
2251  }
2252 
2253  /*
2254  * Report our new scan position for synchronization purposes.
2255  *
2256  * Note: we do this before checking for end of scan so that the
2257  * final state of the position hint is back at the start of the
2258  * rel. That's not strictly necessary, but otherwise when you run
2259  * the same query multiple times the starting position would shift
2260  * a little bit backwards on every invocation, which is confusing.
2261  * We don't guarantee any specific ordering in general, though.
2262  */
2263  if (scan->rs_flags & SO_ALLOW_SYNC)
2264  ss_report_location(scan->rs_rd, blockno);
2265 
2266  if (blockno == hscan->rs_startblock)
2267  {
2268  blockno = InvalidBlockNumber;
2269  }
2270  }
2271  }
2272 
2273  if (!BlockNumberIsValid(blockno))
2274  {
2275  if (BufferIsValid(hscan->rs_cbuf))
2276  ReleaseBuffer(hscan->rs_cbuf);
2277  hscan->rs_cbuf = InvalidBuffer;
2278  hscan->rs_cblock = InvalidBlockNumber;
2279  hscan->rs_inited = false;
2280 
2281  return false;
2282  }
2283 
2284  heapgetpage(scan, blockno);
2285  hscan->rs_inited = true;
2286 
2287  return true;
2288 }
BlockNumber rs_cblock
Definition: heapam.h:59
#define InvalidBuffer
Definition: buf.h:25
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3483
uint32 rs_flags
Definition: relscan.h:43
void heapgetpage(TableScanDesc sscan, BlockNumber page)
Definition: heapam.c:352
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
bool rs_inited
Definition: heapam.h:58
NextSampleBlock_function NextSampleBlock
Definition: tsmapi.h:73
BlockNumber rs_startblock
Definition: heapam.h:53
BlockNumber rs_nblocks
Definition: heapam.h:52
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
Buffer rs_cbuf
Definition: heapam.h:60
#define Assert(condition)
Definition: c.h:738
#define InvalidBlockNumber
Definition: block.h:33
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
struct TsmRoutine * tsmroutine
Definition: execnodes.h:1358
void ss_report_location(Relation rel, BlockNumber location)
Definition: syncscan.c:288
Relation rs_rd
Definition: relscan.h:34

◆ heapam_scan_sample_next_tuple()

static bool heapam_scan_sample_next_tuple ( TableScanDesc  scan,
SampleScanState scanstate,
TupleTableSlot slot 
)
static

Definition at line 2291 of file heapam_handler.c.

References Assert, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CHECK_FOR_INTERRUPTS, ExecClearTuple(), ExecStoreBufferHeapTuple(), HeapCheckForSerializableConflictOut(), ItemIdGetLength, ItemIdIsNormal, ItemPointerSet, LockBuffer(), TsmRoutine::NextSampleTuple, OffsetNumberIsValid, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageIsAllVisible, pgstat_count_heap_getnext, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, SampleHeapTupleVisible(), SO_ALLOW_PAGEMODE, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, SnapshotData::takenDuringRecovery, and SampleScanState::tsmroutine.

Referenced by SampleHeapTupleVisible().

2293 {
2294  HeapScanDesc hscan = (HeapScanDesc) scan;
2295  TsmRoutine *tsm = scanstate->tsmroutine;
2296  BlockNumber blockno = hscan->rs_cblock;
2297  bool pagemode = (scan->rs_flags & SO_ALLOW_PAGEMODE) != 0;
2298 
2299  Page page;
2300  bool all_visible;
2301  OffsetNumber maxoffset;
2302 
2303  /*
2304  * When not using pagemode, we must lock the buffer during tuple
2305  * visibility checks.
2306  */
2307  if (!pagemode)
2309 
2310  page = (Page) BufferGetPage(hscan->rs_cbuf);
2311  all_visible = PageIsAllVisible(page) &&
2313  maxoffset = PageGetMaxOffsetNumber(page);
2314 
2315  for (;;)
2316  {
2317  OffsetNumber tupoffset;
2318 
2320 
2321  /* Ask the tablesample method which tuples to check on this page. */
2322  tupoffset = tsm->NextSampleTuple(scanstate,
2323  blockno,
2324  maxoffset);
2325 
2326  if (OffsetNumberIsValid(tupoffset))
2327  {
2328  ItemId itemid;
2329  bool visible;
2330  HeapTuple tuple = &(hscan->rs_ctup);
2331 
2332  /* Skip invalid tuple pointers. */
2333  itemid = PageGetItemId(page, tupoffset);
2334  if (!ItemIdIsNormal(itemid))
2335  continue;
2336 
2337  tuple->t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2338  tuple->t_len = ItemIdGetLength(itemid);
2339  ItemPointerSet(&(tuple->t_self), blockno, tupoffset);
2340 
2341 
2342  if (all_visible)
2343  visible = true;
2344  else
2345  visible = SampleHeapTupleVisible(scan, hscan->rs_cbuf,
2346  tuple, tupoffset);
2347 
2348  /* in pagemode, heapgetpage did this for us */
2349  if (!pagemode)
2350  HeapCheckForSerializableConflictOut(visible, scan->rs_rd, tuple,
2351  hscan->rs_cbuf, scan->rs_snapshot);
2352 
2353  /* Try next tuple from same page. */
2354  if (!visible)
2355  continue;
2356 
2357  /* Found visible tuple, return it. */
2358  if (!pagemode)
2360 
2361  ExecStoreBufferHeapTuple(tuple, slot, hscan->rs_cbuf);
2362 
2363  /* Count successfully-fetched tuples as heap fetches */
2365 
2366  return true;
2367  }
2368  else
2369  {
2370  /*
2371  * If we get here, it means we've exhausted the items on this page
2372  * and it's time to move to the next.
2373  */
2374  if (!pagemode)
2376 
2377  ExecClearTuple(slot);
2378  return false;
2379  }
2380  }
2381 
2382  Assert(0);
2383 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
BlockNumber rs_cblock
Definition: heapam.h:59
#define PageIsAllVisible(page)
Definition: bufpage.h:385
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
uint32 BlockNumber
Definition: block.h:31
uint32 rs_flags
Definition: relscan.h:43
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleData rs_ctup
Definition: heapam.h:66
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
NextSampleTuple_function NextSampleTuple
Definition: tsmapi.h:74
ItemPointerData t_self
Definition: htup.h:65
static bool SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer, HeapTuple tuple, OffsetNumber tupoffset)
uint32 t_len
Definition: htup.h:64
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:8994
Buffer rs_cbuf
Definition: heapam.h:60
#define Assert(condition)
Definition: c.h:738
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
bool takenDuringRecovery
Definition: snapshot.h:184
struct TsmRoutine * tsmroutine
Definition: execnodes.h:1358
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1401
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127

◆ heapam_slot_callbacks()

static const TupleTableSlotOps* heapam_slot_callbacks ( Relation  relation)
static

Definition at line 66 of file heapam_handler.c.

References TTSOpsBufferHeapTuple.

Referenced by SampleHeapTupleVisible().

67 {
68  return &TTSOpsBufferHeapTuple;
69 }
const TupleTableSlotOps TTSOpsBufferHeapTuple
Definition: execTuples.c:86

◆ heapam_tuple_complete_speculative()

static void heapam_tuple_complete_speculative ( Relation  relation,
TupleTableSlot slot,
uint32  specToken,
bool  succeeded 
)
static

Definition at line 282 of file heapam_handler.c.

References ExecFetchSlotHeapTuple(), heap_abort_speculative(), heap_finish_speculative(), pfree(), and TupleTableSlot::tts_tid.

Referenced by SampleHeapTupleVisible().

284 {
285  bool shouldFree = true;
286  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
287 
288  /* adjust the tuple's state accordingly */
289  if (succeeded)
290  heap_finish_speculative(relation, &slot->tts_tid);
291  else
292  heap_abort_speculative(relation, &slot->tts_tid);
293 
294  if (shouldFree)
295  pfree(tuple);
296 }
void heap_abort_speculative(Relation relation, ItemPointer tid)
Definition: heapam.c:5537
void pfree(void *pointer)
Definition: mcxt.c:1056
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1614
void heap_finish_speculative(Relation relation, ItemPointer tid)
Definition: heapam.c:5446
ItemPointerData tts_tid
Definition: tuptable.h:130

◆ heapam_tuple_delete()

static TM_Result heapam_tuple_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  snapshot,
Snapshot  crosscheck,
bool  wait,
TM_FailureData tmfd,
bool  changingPart 
)
static

Definition at line 299 of file heapam_handler.c.

References heap_delete().

Referenced by SampleHeapTupleVisible().

302 {
303  /*
304  * Currently Deleting of index tuples are handled at vacuum, in case if
305  * the storage itself is cleaning the dead tuples by itself, it is the
306  * time to call the index tuple deletion also.
307  */
308  return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart);
309 }
TM_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: heapam.c:2413

◆ heapam_tuple_insert()

static void heapam_tuple_insert ( Relation  relation,
TupleTableSlot slot,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)
static

Definition at line 240 of file heapam_handler.c.

References ExecFetchSlotHeapTuple(), heap_insert(), ItemPointerCopy, pfree(), RelationGetRelid, HeapTupleData::t_self, HeapTupleData::t_tableOid, TupleTableSlot::tts_tableOid, and TupleTableSlot::tts_tid.

Referenced by SampleHeapTupleVisible().

242 {
243  bool shouldFree = true;
244  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
245 
246  /* Update the tuple with table oid */
247  slot->tts_tableOid = RelationGetRelid(relation);
248  tuple->t_tableOid = slot->tts_tableOid;
249 
250  /* Perform the insertion, and copy the resulting ItemPointer */
251  heap_insert(relation, tuple, cid, options, bistate);
252  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
253 
254  if (shouldFree)
255  pfree(tuple);
256 }
void heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
Definition: heapam.c:1831
Oid tts_tableOid
Definition: tuptable.h:131
void pfree(void *pointer)
Definition: mcxt.c:1056
ItemPointerData t_self
Definition: htup.h:65
Oid t_tableOid
Definition: htup.h:66
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1614
ItemPointerData tts_tid
Definition: tuptable.h:130
#define RelationGetRelid(relation)
Definition: rel.h:456
#define ItemPointerCopy(fromPointer, toPointer)
Definition: itemptr.h:161

◆ heapam_tuple_insert_speculative()

static void heapam_tuple_insert_speculative ( Relation  relation,
TupleTableSlot slot,
CommandId  cid,
int  options,
BulkInsertState  bistate,
uint32  specToken 
)
static

Definition at line 259 of file heapam_handler.c.

References ExecFetchSlotHeapTuple(), heap_insert(), HEAP_INSERT_SPECULATIVE, HeapTupleHeaderSetSpeculativeToken, ItemPointerCopy, pfree(), RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_self, HeapTupleData::t_tableOid, TupleTableSlot::tts_tableOid, and TupleTableSlot::tts_tid.

Referenced by SampleHeapTupleVisible().

262 {
263  bool shouldFree = true;
264  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
265 
266  /* Update the tuple with table oid */
267  slot->tts_tableOid = RelationGetRelid(relation);
268  tuple->t_tableOid = slot->tts_tableOid;
269 
270  HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
272 
273  /* Perform the insertion, and copy the resulting ItemPointer */
274  heap_insert(relation, tuple, cid, options, bistate);
275  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
276 
277  if (shouldFree)
278  pfree(tuple);
279 }
void heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
Definition: heapam.c:1831
Oid tts_tableOid
Definition: tuptable.h:131
#define HeapTupleHeaderSetSpeculativeToken(tup, token)
Definition: htup_details.h:440
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:37
HeapTupleHeader t_data
Definition: htup.h:68
void pfree(void *pointer)
Definition: mcxt.c:1056
ItemPointerData t_self
Definition: htup.h:65
Oid t_tableOid
Definition: htup.h:66
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1614
ItemPointerData tts_tid
Definition: tuptable.h:130
#define RelationGetRelid(relation)
Definition: rel.h:456
#define ItemPointerCopy(fromPointer, toPointer)
Definition: itemptr.h:161

◆ heapam_tuple_lock()

static TM_Result heapam_tuple_lock ( Relation  relation,
ItemPointer  tid,
Snapshot  snapshot,
TupleTableSlot slot,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
uint8  flags,
TM_FailureData tmfd 
)
static

Definition at line 347 of file heapam_handler.c.

References Assert, BufferHeapTupleTableSlot::base, BufferIsValid, TM_FailureData::cmax, ConditionalXactLockTableWait(), TM_FailureData::ctid, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), errmsg_internal(), ERROR, ExecStorePinnedBufferHeapTuple(), heap_fetch(), heap_lock_tuple(), HeapTupleHeaderGetCmin(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIsSpeculative, InitDirtySnapshot, ItemPointerEquals(), ItemPointerIndicatesMovedPartitions, LockWaitBlock, LockWaitError, LockWaitSkip, RelationGetRelationName, RelationGetRelid, ReleaseBuffer(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_Deleted, TM_SelfModified, TM_Updated, TM_WouldBlock, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TransactionIdIsValid, TM_FailureData::traversed, TTS_IS_BUFFERTUPLE, TupleTableSlot::tts_tableOid, HeapTupleTableSlot::tupdata, TUPLE_LOCK_FLAG_FIND_LAST_VERSION, TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS, XactLockTableWait(), XLTW_FetchUpdated, TM_FailureData::xmax, SnapshotData::xmax, and SnapshotData::xmin.

Referenced by SampleHeapTupleVisible().

351 {
353  TM_Result result;
354  Buffer buffer;
355  HeapTuple tuple = &bslot->base.tupdata;
356  bool follow_updates;
357 
358  follow_updates = (flags & TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS) != 0;
359  tmfd->traversed = false;
360 
361  Assert(TTS_IS_BUFFERTUPLE(slot));
362 
363 tuple_lock_retry:
364  tuple->t_self = *tid;
365  result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy,
366  follow_updates, &buffer, tmfd);
367 
368  if (result == TM_Updated &&
370  {
371  ReleaseBuffer(buffer);
372  /* Should not encounter speculative tuple on recheck */
374 
375  if (!ItemPointerEquals(&tmfd->ctid, &tuple->t_self))
376  {
377  SnapshotData SnapshotDirty;
378  TransactionId priorXmax;
379 
380  /* it was updated, so look at the updated version */
381  *tid = tmfd->ctid;
382  /* updated row should have xmin matching this xmax */
383  priorXmax = tmfd->xmax;
384 
385  /* signal that a tuple later in the chain is getting locked */
386  tmfd->traversed = true;
387 
388  /*
389  * fetch target tuple
390  *
391  * Loop here to deal with updated or busy tuples
392  */
393  InitDirtySnapshot(SnapshotDirty);
394  for (;;)
395  {
397  ereport(ERROR,
398  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
399  errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
400 
401  tuple->t_self = *tid;
402  if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer))
403  {
404  /*
405  * If xmin isn't what we're expecting, the slot must have
406  * been recycled and reused for an unrelated tuple. This
407  * implies that the latest version of the row was deleted,
408  * so we need do nothing. (Should be safe to examine xmin
409  * without getting buffer's content lock. We assume
410  * reading a TransactionId to be atomic, and Xmin never
411  * changes in an existing tuple, except to invalid or
412  * frozen, and neither of those can match priorXmax.)
413  */
415  priorXmax))
416  {
417  ReleaseBuffer(buffer);
418  return TM_Deleted;
419  }
420 
421  /* otherwise xmin should not be dirty... */
422  if (TransactionIdIsValid(SnapshotDirty.xmin))
423  ereport(ERROR,
425  errmsg_internal("t_xmin is uncommitted in tuple to be updated")));
426 
427  /*
428  * If tuple is being updated by other transaction then we
429  * have to wait for its commit/abort, or die trying.
430  */
431  if (TransactionIdIsValid(SnapshotDirty.xmax))
432  {
433  ReleaseBuffer(buffer);
434  switch (wait_policy)
435  {
436  case LockWaitBlock:
437  XactLockTableWait(SnapshotDirty.xmax,
438  relation, &tuple->t_self,
440  break;
441  case LockWaitSkip:
442  if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
443  /* skip instead of waiting */
444  return TM_WouldBlock;
445  break;
446  case LockWaitError:
447  if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
448  ereport(ERROR,
449  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
450  errmsg("could not obtain lock on row in relation \"%s\"",
451  RelationGetRelationName(relation))));
452  break;
453  }
454  continue; /* loop back to repeat heap_fetch */
455  }
456 
457  /*
458  * If tuple was inserted by our own transaction, we have
459  * to check cmin against cid: cmin >= current CID means
460  * our command cannot see the tuple, so we should ignore
461  * it. Otherwise heap_lock_tuple() will throw an error,
462  * and so would any later attempt to update or delete the
463  * tuple. (We need not check cmax because
464  * HeapTupleSatisfiesDirty will consider a tuple deleted
465  * by our transaction dead, regardless of cmax.) We just
466  * checked that priorXmax == xmin, so we can test that
467  * variable instead of doing HeapTupleHeaderGetXmin again.
468  */
469  if (TransactionIdIsCurrentTransactionId(priorXmax) &&
470  HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
471  {
472  tmfd->xmax = priorXmax;
473 
474  /*
475  * Cmin is the problematic value, so store that. See
476  * above.
477  */
478  tmfd->cmax = HeapTupleHeaderGetCmin(tuple->t_data);
479  ReleaseBuffer(buffer);
480  return TM_SelfModified;
481  }
482 
483  /*
484  * This is a live tuple, so try to lock it again.
485  */
486  ReleaseBuffer(buffer);
487  goto tuple_lock_retry;
488  }
489 
490  /*
491  * If the referenced slot was actually empty, the latest
492  * version of the row must have been deleted, so we need do
493  * nothing.
494  */
495  if (tuple->t_data == NULL)
496  {
497  return TM_Deleted;
498  }
499 
500  /*
501  * As above, if xmin isn't what we're expecting, do nothing.
502  */
504  priorXmax))
505  {
506  if (BufferIsValid(buffer))
507  ReleaseBuffer(buffer);
508  return TM_Deleted;
509  }
510 
511  /*
512  * If we get here, the tuple was found but failed
513  * SnapshotDirty. Assuming the xmin is either a committed xact
514  * or our own xact (as it certainly should be if we're trying
515  * to modify the tuple), this must mean that the row was
516  * updated or deleted by either a committed xact or our own
517  * xact. If it was deleted, we can ignore it; if it was
518  * updated then chain up to the next version and repeat the
519  * whole process.
520  *
521  * As above, it should be safe to examine xmax and t_ctid
522  * without the buffer content lock, because they can't be
523  * changing.
524  */
525  if (ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
526  {
527  /* deleted, so forget about it */
528  if (BufferIsValid(buffer))
529  ReleaseBuffer(buffer);
530  return TM_Deleted;
531  }
532 
533  /* updated, so look at the updated row */
534  *tid = tuple->t_data->t_ctid;
535  /* updated row should have xmin matching this xmax */
536  priorXmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
537  if (BufferIsValid(buffer))
538  ReleaseBuffer(buffer);
539  /* loop back to fetch next in chain */
540  }
541  }
542  else
543  {
544  /* tuple was deleted, so give up */
545  return TM_Deleted;
546  }
547  }
548 
549  slot->tts_tableOid = RelationGetRelid(relation);
550  tuple->t_tableOid = slot->tts_tableOid;
551 
552  /* store in slot, transferring existing pin */
553  ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
554 
555  return result;
556 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
Oid tts_tableOid
Definition: tuptable.h:131
ItemPointerData ctid
Definition: tableam.h:124
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define InitDirtySnapshot(snapshotdata)
Definition: snapmgr.h:76
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:513
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:854
CommandId HeapTupleHeaderGetCmin(HeapTupleHeader tup)
Definition: combocid.c:104
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf)
Definition: heapam.c:1373
CommandId cmax
Definition: tableam.h:126
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
int errcode(int sqlerrcode)
Definition: elog.c:610
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3483
#define TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS
Definition: tableam.h:138
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:697
HeapTupleHeader t_data
Definition: htup.h:68
TransactionId xmax
Definition: tableam.h:125
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
#define RelationGetRelationName(relation)
Definition: rel.h:490
Oid t_tableOid
Definition: htup.h:66
TransactionId xmax
Definition: snapshot.h:158
TransactionId xmin
Definition: snapshot.h:157
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
TM_Result
Definition: tableam.h:69
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:231
#define ItemPointerIndicatesMovedPartitions(pointer)
Definition: itemptr.h:184
#define ereport(elevel,...)
Definition: elog.h:144
int errmsg_internal(const char *fmt,...)
Definition: elog.c:911
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:624
#define Assert(condition)
Definition: c.h:738
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define TUPLE_LOCK_FLAG_FIND_LAST_VERSION
Definition: tableam.h:140
TupleTableSlot * ExecStorePinnedBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1388
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
Definition: heapam.c:3944
int errmsg(const char *fmt,...)
Definition: elog.c:824
HeapTupleTableSlot base
Definition: tuptable.h:259
#define TransactionIdIsValid(xid)
Definition: transam.h:41
bool traversed
Definition: tableam.h:127
HeapTupleData tupdata
Definition: tuptable.h:253
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:456

◆ heapam_tuple_satisfies_snapshot()

static bool heapam_tuple_satisfies_snapshot ( Relation  rel,
TupleTableSlot slot,
Snapshot  snapshot 
)
static

Definition at line 212 of file heapam_handler.c.

References Assert, BufferHeapTupleTableSlot::base, BufferHeapTupleTableSlot::buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferIsValid, HeapTupleSatisfiesVisibility(), LockBuffer(), TTS_IS_BUFFERTUPLE, and HeapTupleTableSlot::tuple.

Referenced by SampleHeapTupleVisible().

214 {
216  bool res;
217 
218  Assert(TTS_IS_BUFFERTUPLE(slot));
219  Assert(BufferIsValid(bslot->buffer));
220 
221  /*
222  * We need buffer pin and lock to call HeapTupleSatisfiesVisibility.
223  * Caller should be holding pin, but not lock.
224  */
226  res = HeapTupleSatisfiesVisibility(bslot->base.tuple, snapshot,
227  bslot->buffer);
229 
230  return res;
231 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
HeapTuple tuple
Definition: tuptable.h:250
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
#define Assert(condition)
Definition: c.h:738
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
HeapTupleTableSlot base
Definition: tuptable.h:259
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heapam_tuple_tid_valid()

static bool heapam_tuple_tid_valid ( TableScanDesc  scan,
ItemPointer  tid 
)
static

Definition at line 203 of file heapam_handler.c.

References ItemPointerGetBlockNumber, ItemPointerIsValid, and HeapScanDescData::rs_nblocks.

Referenced by SampleHeapTupleVisible().

204 {
205  HeapScanDesc hscan = (HeapScanDesc) scan;
206 
207  return ItemPointerIsValid(tid) &&
209 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
BlockNumber rs_nblocks
Definition: heapam.h:52
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98

◆ heapam_tuple_update()

static TM_Result heapam_tuple_update ( Relation  relation,
ItemPointer  otid,
TupleTableSlot slot,
CommandId  cid,
Snapshot  snapshot,
Snapshot  crosscheck,
bool  wait,
TM_FailureData tmfd,
LockTupleMode lockmode,
bool update_indexes 
)
static

Definition at line 313 of file heapam_handler.c.

References ExecFetchSlotHeapTuple(), heap_update(), HeapTupleIsHeapOnly, ItemPointerCopy, pfree(), RelationGetRelid, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_Ok, TupleTableSlot::tts_tableOid, and TupleTableSlot::tts_tid.

Referenced by SampleHeapTupleVisible().

317 {
318  bool shouldFree = true;
319  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
320  TM_Result result;
321 
322  /* Update the tuple with table oid */
323  slot->tts_tableOid = RelationGetRelid(relation);
324  tuple->t_tableOid = slot->tts_tableOid;
325 
326  result = heap_update(relation, otid, tuple, cid, crosscheck, wait,
327  tmfd, lockmode);
328  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
329 
330  /*
331  * Decide whether new index entries are needed for the tuple
332  *
333  * Note: heap_update returns the tid (location) of the new tuple in the
334  * t_self field.
335  *
336  * If it's a HOT update, we mustn't insert new index entries.
337  */
338  *update_indexes = result == TM_Ok && !HeapTupleIsHeapOnly(tuple);
339 
340  if (shouldFree)
341  pfree(tuple);
342 
343  return result;
344 }
Oid tts_tableOid
Definition: tuptable.h:131
void pfree(void *pointer)
Definition: mcxt.c:1056
ItemPointerData t_self
Definition: htup.h:65
Oid t_tableOid
Definition: htup.h:66
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1614
TM_Result
Definition: tableam.h:69
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
Definition: tableam.h:75
TM_Result heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode)
Definition: heapam.c:2864
ItemPointerData tts_tid
Definition: tuptable.h:130
#define RelationGetRelid(relation)
Definition: rel.h:456
#define ItemPointerCopy(fromPointer, toPointer)
Definition: itemptr.h:161

◆ reform_and_rewrite_tuple()

static void reform_and_rewrite_tuple ( HeapTuple  tuple,
Relation  OldHeap,
Relation  NewHeap,
Datum values,
bool isnull,
RewriteState  rwstate 
)
static

Definition at line 2408 of file heapam_handler.c.

References heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), i, TupleDescData::natts, RelationGetDescr, rewrite_heap_tuple(), and TupleDescAttr.

Referenced by heapam_relation_copy_for_cluster().

2411 {
2412  TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
2413  TupleDesc newTupDesc = RelationGetDescr(NewHeap);
2414  HeapTuple copiedTuple;
2415  int i;
2416 
2417  heap_deform_tuple(tuple, oldTupDesc, values, isnull);
2418 
2419  /* Be sure to null out any dropped columns */
2420  for (i = 0; i < newTupDesc->natts; i++)
2421  {
2422  if (TupleDescAttr(newTupDesc, i)->attisdropped)
2423  isnull[i] = true;
2424  }
2425 
2426  copiedTuple = heap_form_tuple(newTupDesc, values, isnull);
2427 
2428  /* The heap rewrite module does the rest */
2429  rewrite_heap_tuple(rwstate, tuple, copiedTuple);
2430 
2431  heap_freetuple(copiedTuple);
2432 }
#define RelationGetDescr(relation)
Definition: rel.h:482
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:1020
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1249
static Datum values[MAXATTR]
Definition: bootstrap.c:167
int i
void rewrite_heap_tuple(RewriteState state, HeapTuple old_tuple, HeapTuple new_tuple)
Definition: rewriteheap.c:363

◆ SampleHeapTupleVisible()

static bool SampleHeapTupleVisible ( TableScanDesc  scan,
Buffer  buffer,
HeapTuple  tuple,
OffsetNumber  tupoffset 
)
static

Definition at line 2438 of file heapam_handler.c.

References heap_beginscan(), heap_compute_xid_horizon_for_tuples(), heap_endscan(), heap_fetch_toast_slice(), heap_get_latest_tid(), heap_getnextslot(), heap_multi_insert(), heap_rescan(), heap_vacuum_rel(), heapam_estimate_rel_size(), heapam_fetch_row_version(), heapam_index_build_range_scan(), heapam_index_fetch_begin(), heapam_index_fetch_end(), heapam_index_fetch_reset(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_data(), heapam_relation_copy_for_cluster(), heapam_relation_needs_toast_table(), heapam_relation_nontransactional_truncate(), heapam_relation_set_new_filenode(), heapam_relation_toast_am(), heapam_scan_analyze_next_block(), heapam_scan_analyze_next_tuple(), heapam_scan_bitmap_next_block(), heapam_scan_bitmap_next_tuple(), heapam_scan_sample_next_block(), heapam_scan_sample_next_tuple(), heapam_slot_callbacks(), heapam_tuple_complete_speculative(), heapam_tuple_delete(), heapam_tuple_insert(), heapam_tuple_insert_speculative(), heapam_tuple_lock(), heapam_tuple_satisfies_snapshot(), heapam_tuple_tid_valid(), heapam_tuple_update(), HeapTupleSatisfiesVisibility(), TableScanDescData::rs_flags, HeapScanDescData::rs_ntuples, TableScanDescData::rs_snapshot, HeapScanDescData::rs_vistuples, SO_ALLOW_PAGEMODE, T_TableAmRoutine, table_block_parallelscan_estimate(), table_block_parallelscan_initialize(), table_block_parallelscan_reinitialize(), table_block_relation_size(), and TableAmRoutine::type.

Referenced by heapam_scan_sample_next_tuple().

2441 {
2442  HeapScanDesc hscan = (HeapScanDesc) scan;
2443 
2444  if (scan->rs_flags & SO_ALLOW_PAGEMODE)
2445  {
2446  /*
2447  * In pageatatime mode, heapgetpage() already did visibility checks,
2448  * so just look at the info it left in rs_vistuples[].
2449  *
2450  * We use a binary search over the known-sorted array. Note: we could
2451  * save some effort if we insisted that NextSampleTuple select tuples
2452  * in increasing order, but it's not clear that there would be enough
2453  * gain to justify the restriction.
2454  */
2455  int start = 0,
2456  end = hscan->rs_ntuples - 1;
2457 
2458  while (start <= end)
2459  {
2460  int mid = (start + end) / 2;
2461  OffsetNumber curoffset = hscan->rs_vistuples[mid];
2462 
2463  if (tupoffset == curoffset)
2464  return true;
2465  else if (tupoffset < curoffset)
2466  end = mid - 1;
2467  else
2468  start = mid + 1;
2469  }
2470 
2471  return false;
2472  }
2473  else
2474  {
2475  /* Otherwise, we have to check the tuple individually. */
2476  return HeapTupleSatisfiesVisibility(tuple, scan->rs_snapshot,
2477  buffer);
2478  }
2479 }
uint32 rs_flags
Definition: relscan.h:43
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
uint16 OffsetNumber
Definition: off.h:24
int rs_ntuples
Definition: heapam.h:70
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
Definition: heapam.h:71
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

Variable Documentation

◆ heapam_methods

static const TableAmRoutine heapam_methods
static

Definition at line 57 of file heapam_handler.c.

Referenced by GetHeapamTableAmRoutine().