PostgreSQL Source Code  git master
heapam_handler.c File Reference
#include "postgres.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heaptoast.h"
#include "access/multixact.h"
#include "access/rewriteheap.h"
#include "access/syncscan.h"
#include "access/tableam.h"
#include "access/tsmapi.h"
#include "access/xact.h"
#include "catalog/catalog.h"
#include "catalog/index.h"
#include "catalog/storage.h"
#include "catalog/storage_xlog.h"
#include "commands/progress.h"
#include "executor/executor.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "storage/bufpage.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "utils/builtins.h"
#include "utils/rel.h"
Include dependency graph for heapam_handler.c:

Go to the source code of this file.

Macros

#define HEAP_OVERHEAD_BYTES_PER_TUPLE    (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))
 
#define HEAP_USABLE_BYTES_PER_PAGE    (BLCKSZ - SizeOfPageHeaderData)
 

Functions

static void reform_and_rewrite_tuple (HeapTuple tuple, Relation OldHeap, Relation NewHeap, Datum *values, bool *isnull, RewriteState rwstate)
 
static bool SampleHeapTupleVisible (TableScanDesc scan, Buffer buffer, HeapTuple tuple, OffsetNumber tupoffset)
 
static BlockNumber heapam_scan_get_blocks_done (HeapScanDesc hscan)
 
static const TupleTableSlotOpsheapam_slot_callbacks (Relation relation)
 
static IndexFetchTableDataheapam_index_fetch_begin (Relation rel)
 
static void heapam_index_fetch_reset (IndexFetchTableData *scan)
 
static void heapam_index_fetch_end (IndexFetchTableData *scan)
 
static bool heapam_index_fetch_tuple (struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
 
static bool heapam_fetch_row_version (Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot)
 
static bool heapam_tuple_tid_valid (TableScanDesc scan, ItemPointer tid)
 
static bool heapam_tuple_satisfies_snapshot (Relation rel, TupleTableSlot *slot, Snapshot snapshot)
 
static void heapam_tuple_insert (Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate)
 
static void heapam_tuple_insert_speculative (Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate, uint32 specToken)
 
static void heapam_tuple_complete_speculative (Relation relation, TupleTableSlot *slot, uint32 specToken, bool succeeded)
 
static TM_Result heapam_tuple_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
 
static TM_Result heapam_tuple_update (Relation relation, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
 
static TM_Result heapam_tuple_lock (Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd)
 
static void heapam_relation_set_new_filelocator (Relation rel, const RelFileLocator *newrlocator, char persistence, TransactionId *freezeXid, MultiXactId *minmulti)
 
static void heapam_relation_nontransactional_truncate (Relation rel)
 
static void heapam_relation_copy_data (Relation rel, const RelFileLocator *newrlocator)
 
static void heapam_relation_copy_for_cluster (Relation OldHeap, Relation NewHeap, Relation OldIndex, bool use_sort, TransactionId OldestXmin, TransactionId *xid_cutoff, MultiXactId *multi_cutoff, double *num_tuples, double *tups_vacuumed, double *tups_recently_dead)
 
static bool heapam_scan_analyze_next_block (TableScanDesc scan, BlockNumber blockno, BufferAccessStrategy bstrategy)
 
static bool heapam_scan_analyze_next_tuple (TableScanDesc scan, TransactionId OldestXmin, double *liverows, double *deadrows, TupleTableSlot *slot)
 
static double heapam_index_build_range_scan (Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, bool allow_sync, bool anyvisible, bool progress, BlockNumber start_blockno, BlockNumber numblocks, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
 
static void heapam_index_validate_scan (Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, Snapshot snapshot, ValidateIndexState *state)
 
static bool heapam_relation_needs_toast_table (Relation rel)
 
static Oid heapam_relation_toast_am (Relation rel)
 
static void heapam_estimate_rel_size (Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
 
static bool heapam_scan_bitmap_next_block (TableScanDesc scan, TBMIterateResult *tbmres)
 
static bool heapam_scan_bitmap_next_tuple (TableScanDesc scan, TBMIterateResult *tbmres, TupleTableSlot *slot)
 
static bool heapam_scan_sample_next_block (TableScanDesc scan, SampleScanState *scanstate)
 
static bool heapam_scan_sample_next_tuple (TableScanDesc scan, SampleScanState *scanstate, TupleTableSlot *slot)
 
const TableAmRoutineGetHeapamTableAmRoutine (void)
 
Datum heap_tableam_handler (PG_FUNCTION_ARGS)
 

Variables

static const TableAmRoutine heapam_methods
 

Macro Definition Documentation

◆ HEAP_OVERHEAD_BYTES_PER_TUPLE

#define HEAP_OVERHEAD_BYTES_PER_TUPLE    (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))

Definition at line 2092 of file heapam_handler.c.

◆ HEAP_USABLE_BYTES_PER_PAGE

#define HEAP_USABLE_BYTES_PER_PAGE    (BLCKSZ - SizeOfPageHeaderData)

Definition at line 2094 of file heapam_handler.c.

Function Documentation

◆ GetHeapamTableAmRoutine()

const TableAmRoutine* GetHeapamTableAmRoutine ( void  )

Definition at line 2602 of file heapam_handler.c.

2603 {
2604  return &heapam_methods;
2605 }
static const TableAmRoutine heapam_methods

References heapam_methods.

Referenced by formrdesc(), and heap_getnext().

◆ heap_tableam_handler()

Datum heap_tableam_handler ( PG_FUNCTION_ARGS  )

Definition at line 2608 of file heapam_handler.c.

2609 {
2611 }
#define PG_RETURN_POINTER(x)
Definition: fmgr.h:361

References heapam_methods, and PG_RETURN_POINTER.

◆ heapam_estimate_rel_size()

static void heapam_estimate_rel_size ( Relation  rel,
int32 attr_widths,
BlockNumber pages,
double *  tuples,
double *  allvisfrac 
)
static

Definition at line 2098 of file heapam_handler.c.

2101 {
2102  table_block_relation_estimate_size(rel, attr_widths, pages,
2103  tuples, allvisfrac,
2106 }
#define HEAP_OVERHEAD_BYTES_PER_TUPLE
#define HEAP_USABLE_BYTES_PER_PAGE
void table_block_relation_estimate_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
Definition: tableam.c:663

References HEAP_OVERHEAD_BYTES_PER_TUPLE, HEAP_USABLE_BYTES_PER_PAGE, and table_block_relation_estimate_size().

◆ heapam_fetch_row_version()

static bool heapam_fetch_row_version ( Relation  relation,
ItemPointer  tid,
Snapshot  snapshot,
TupleTableSlot slot 
)
static

Definition at line 180 of file heapam_handler.c.

184 {
186  Buffer buffer;
187 
188  Assert(TTS_IS_BUFFERTUPLE(slot));
189 
190  bslot->base.tupdata.t_self = *tid;
191  if (heap_fetch(relation, snapshot, &bslot->base.tupdata, &buffer, false))
192  {
193  /* store in slot, transferring existing pin */
194  ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata, slot, buffer);
195  slot->tts_tableOid = RelationGetRelid(relation);
196 
197  return true;
198  }
199 
200  return false;
201 }
int Buffer
Definition: buf.h:23
TupleTableSlot * ExecStorePinnedBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1419
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf)
Definition: heapam.c:1345
Assert(fmt[strlen(fmt) - 1] !='\n')
#define RelationGetRelid(relation)
Definition: rel.h:504
Oid tts_tableOid
Definition: tuptable.h:130
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:230

References Assert(), ExecStorePinnedBufferHeapTuple(), heap_fetch(), RelationGetRelid, TTS_IS_BUFFERTUPLE, and TupleTableSlot::tts_tableOid.

◆ heapam_index_build_range_scan()

static double heapam_index_build_range_scan ( Relation  heapRelation,
Relation  indexRelation,
IndexInfo indexInfo,
bool  allow_sync,
bool  anyvisible,
bool  progress,
BlockNumber  start_blockno,
BlockNumber  numblocks,
IndexBuildCallback  callback,
void *  callback_state,
TableScanDesc  scan 
)
static

Definition at line 1166 of file heapam_handler.c.

1177 {
1178  HeapScanDesc hscan;
1179  bool is_system_catalog;
1180  bool checking_uniqueness;
1181  HeapTuple heapTuple;
1183  bool isnull[INDEX_MAX_KEYS];
1184  double reltuples;
1185  ExprState *predicate;
1186  TupleTableSlot *slot;
1187  EState *estate;
1188  ExprContext *econtext;
1189  Snapshot snapshot;
1190  bool need_unregister_snapshot = false;
1191  TransactionId OldestXmin;
1192  BlockNumber previous_blkno = InvalidBlockNumber;
1193  BlockNumber root_blkno = InvalidBlockNumber;
1194  OffsetNumber root_offsets[MaxHeapTuplesPerPage];
1195 
1196  /*
1197  * sanity checks
1198  */
1199  Assert(OidIsValid(indexRelation->rd_rel->relam));
1200 
1201  /* Remember if it's a system catalog */
1202  is_system_catalog = IsSystemRelation(heapRelation);
1203 
1204  /* See whether we're verifying uniqueness/exclusion properties */
1205  checking_uniqueness = (indexInfo->ii_Unique ||
1206  indexInfo->ii_ExclusionOps != NULL);
1207 
1208  /*
1209  * "Any visible" mode is not compatible with uniqueness checks; make sure
1210  * only one of those is requested.
1211  */
1212  Assert(!(anyvisible && checking_uniqueness));
1213 
1214  /*
1215  * Need an EState for evaluation of index expressions and partial-index
1216  * predicates. Also a slot to hold the current tuple.
1217  */
1218  estate = CreateExecutorState();
1219  econtext = GetPerTupleExprContext(estate);
1220  slot = table_slot_create(heapRelation, NULL);
1221 
1222  /* Arrange for econtext's scan tuple to be the tuple under test */
1223  econtext->ecxt_scantuple = slot;
1224 
1225  /* Set up execution state for predicate, if any. */
1226  predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
1227 
1228  /*
1229  * Prepare for scan of the base relation. In a normal index build, we use
1230  * SnapshotAny because we must retrieve all tuples and do our own time
1231  * qual checks (because we have to index RECENTLY_DEAD tuples). In a
1232  * concurrent build, or during bootstrap, we take a regular MVCC snapshot
1233  * and index whatever's live according to that.
1234  */
1235  OldestXmin = InvalidTransactionId;
1236 
1237  /* okay to ignore lazy VACUUMs here */
1238  if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent)
1239  OldestXmin = GetOldestNonRemovableTransactionId(heapRelation);
1240 
1241  if (!scan)
1242  {
1243  /*
1244  * Serial index build.
1245  *
1246  * Must begin our own heap scan in this case. We may also need to
1247  * register a snapshot whose lifetime is under our direct control.
1248  */
1249  if (!TransactionIdIsValid(OldestXmin))
1250  {
1252  need_unregister_snapshot = true;
1253  }
1254  else
1255  snapshot = SnapshotAny;
1256 
1257  scan = table_beginscan_strat(heapRelation, /* relation */
1258  snapshot, /* snapshot */
1259  0, /* number of keys */
1260  NULL, /* scan key */
1261  true, /* buffer access strategy OK */
1262  allow_sync); /* syncscan OK? */
1263  }
1264  else
1265  {
1266  /*
1267  * Parallel index build.
1268  *
1269  * Parallel case never registers/unregisters own snapshot. Snapshot
1270  * is taken from parallel heap scan, and is SnapshotAny or an MVCC
1271  * snapshot, based on same criteria as serial case.
1272  */
1274  Assert(allow_sync);
1275  snapshot = scan->rs_snapshot;
1276  }
1277 
1278  hscan = (HeapScanDesc) scan;
1279 
1280  /*
1281  * Must have called GetOldestNonRemovableTransactionId() if using
1282  * SnapshotAny. Shouldn't have for an MVCC snapshot. (It's especially
1283  * worth checking this for parallel builds, since ambuild routines that
1284  * support parallel builds must work these details out for themselves.)
1285  */
1286  Assert(snapshot == SnapshotAny || IsMVCCSnapshot(snapshot));
1287  Assert(snapshot == SnapshotAny ? TransactionIdIsValid(OldestXmin) :
1288  !TransactionIdIsValid(OldestXmin));
1289  Assert(snapshot == SnapshotAny || !anyvisible);
1290 
1291  /* Publish number of blocks to scan */
1292  if (progress)
1293  {
1294  BlockNumber nblocks;
1295 
1296  if (hscan->rs_base.rs_parallel != NULL)
1297  {
1299 
1301  nblocks = pbscan->phs_nblocks;
1302  }
1303  else
1304  nblocks = hscan->rs_nblocks;
1305 
1307  nblocks);
1308  }
1309 
1310  /* set our scan endpoints */
1311  if (!allow_sync)
1312  heap_setscanlimits(scan, start_blockno, numblocks);
1313  else
1314  {
1315  /* syncscan can only be requested on whole relation */
1316  Assert(start_blockno == 0);
1317  Assert(numblocks == InvalidBlockNumber);
1318  }
1319 
1320  reltuples = 0;
1321 
1322  /*
1323  * Scan all tuples in the base relation.
1324  */
1325  while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
1326  {
1327  bool tupleIsAlive;
1328 
1330 
1331  /* Report scan progress, if asked to. */
1332  if (progress)
1333  {
1334  BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
1335 
1336  if (blocks_done != previous_blkno)
1337  {
1339  blocks_done);
1340  previous_blkno = blocks_done;
1341  }
1342  }
1343 
1344  /*
1345  * When dealing with a HOT-chain of updated tuples, we want to index
1346  * the values of the live tuple (if any), but index it under the TID
1347  * of the chain's root tuple. This approach is necessary to preserve
1348  * the HOT-chain structure in the heap. So we need to be able to find
1349  * the root item offset for every tuple that's in a HOT-chain. When
1350  * first reaching a new page of the relation, call
1351  * heap_get_root_tuples() to build a map of root item offsets on the
1352  * page.
1353  *
1354  * It might look unsafe to use this information across buffer
1355  * lock/unlock. However, we hold ShareLock on the table so no
1356  * ordinary insert/update/delete should occur; and we hold pin on the
1357  * buffer continuously while visiting the page, so no pruning
1358  * operation can occur either.
1359  *
1360  * In cases with only ShareUpdateExclusiveLock on the table, it's
1361  * possible for some HOT tuples to appear that we didn't know about
1362  * when we first read the page. To handle that case, we re-obtain the
1363  * list of root offsets when a HOT tuple points to a root item that we
1364  * don't know about.
1365  *
1366  * Also, although our opinions about tuple liveness could change while
1367  * we scan the page (due to concurrent transaction commits/aborts),
1368  * the chain root locations won't, so this info doesn't need to be
1369  * rebuilt after waiting for another transaction.
1370  *
1371  * Note the implied assumption that there is no more than one live
1372  * tuple per HOT-chain --- else we could create more than one index
1373  * entry pointing to the same root tuple.
1374  */
1375  if (hscan->rs_cblock != root_blkno)
1376  {
1377  Page page = BufferGetPage(hscan->rs_cbuf);
1378 
1380  heap_get_root_tuples(page, root_offsets);
1382 
1383  root_blkno = hscan->rs_cblock;
1384  }
1385 
1386  if (snapshot == SnapshotAny)
1387  {
1388  /* do our own time qual check */
1389  bool indexIt;
1390  TransactionId xwait;
1391 
1392  recheck:
1393 
1394  /*
1395  * We could possibly get away with not locking the buffer here,
1396  * since caller should hold ShareLock on the relation, but let's
1397  * be conservative about it. (This remark is still correct even
1398  * with HOT-pruning: our pin on the buffer prevents pruning.)
1399  */
1401 
1402  /*
1403  * The criteria for counting a tuple as live in this block need to
1404  * match what analyze.c's heapam_scan_analyze_next_tuple() does,
1405  * otherwise CREATE INDEX and ANALYZE may produce wildly different
1406  * reltuples values, e.g. when there are many recently-dead
1407  * tuples.
1408  */
1409  switch (HeapTupleSatisfiesVacuum(heapTuple, OldestXmin,
1410  hscan->rs_cbuf))
1411  {
1412  case HEAPTUPLE_DEAD:
1413  /* Definitely dead, we can ignore it */
1414  indexIt = false;
1415  tupleIsAlive = false;
1416  break;
1417  case HEAPTUPLE_LIVE:
1418  /* Normal case, index and unique-check it */
1419  indexIt = true;
1420  tupleIsAlive = true;
1421  /* Count it as live, too */
1422  reltuples += 1;
1423  break;
1425 
1426  /*
1427  * If tuple is recently deleted then we must index it
1428  * anyway to preserve MVCC semantics. (Pre-existing
1429  * transactions could try to use the index after we finish
1430  * building it, and may need to see such tuples.)
1431  *
1432  * However, if it was HOT-updated then we must only index
1433  * the live tuple at the end of the HOT-chain. Since this
1434  * breaks semantics for pre-existing snapshots, mark the
1435  * index as unusable for them.
1436  *
1437  * We don't count recently-dead tuples in reltuples, even
1438  * if we index them; see heapam_scan_analyze_next_tuple().
1439  */
1440  if (HeapTupleIsHotUpdated(heapTuple))
1441  {
1442  indexIt = false;
1443  /* mark the index as unsafe for old snapshots */
1444  indexInfo->ii_BrokenHotChain = true;
1445  }
1446  else
1447  indexIt = true;
1448  /* In any case, exclude the tuple from unique-checking */
1449  tupleIsAlive = false;
1450  break;
1452 
1453  /*
1454  * In "anyvisible" mode, this tuple is visible and we
1455  * don't need any further checks.
1456  */
1457  if (anyvisible)
1458  {
1459  indexIt = true;
1460  tupleIsAlive = true;
1461  reltuples += 1;
1462  break;
1463  }
1464 
1465  /*
1466  * Since caller should hold ShareLock or better, normally
1467  * the only way to see this is if it was inserted earlier
1468  * in our own transaction. However, it can happen in
1469  * system catalogs, since we tend to release write lock
1470  * before commit there. Give a warning if neither case
1471  * applies.
1472  */
1473  xwait = HeapTupleHeaderGetXmin(heapTuple->t_data);
1475  {
1476  if (!is_system_catalog)
1477  elog(WARNING, "concurrent insert in progress within table \"%s\"",
1478  RelationGetRelationName(heapRelation));
1479 
1480  /*
1481  * If we are performing uniqueness checks, indexing
1482  * such a tuple could lead to a bogus uniqueness
1483  * failure. In that case we wait for the inserting
1484  * transaction to finish and check again.
1485  */
1486  if (checking_uniqueness)
1487  {
1488  /*
1489  * Must drop the lock on the buffer before we wait
1490  */
1492  XactLockTableWait(xwait, heapRelation,
1493  &heapTuple->t_self,
1496  goto recheck;
1497  }
1498  }
1499  else
1500  {
1501  /*
1502  * For consistency with
1503  * heapam_scan_analyze_next_tuple(), count
1504  * HEAPTUPLE_INSERT_IN_PROGRESS tuples as live only
1505  * when inserted by our own transaction.
1506  */
1507  reltuples += 1;
1508  }
1509 
1510  /*
1511  * We must index such tuples, since if the index build
1512  * commits then they're good.
1513  */
1514  indexIt = true;
1515  tupleIsAlive = true;
1516  break;
1518 
1519  /*
1520  * As with INSERT_IN_PROGRESS case, this is unexpected
1521  * unless it's our own deletion or a system catalog; but
1522  * in anyvisible mode, this tuple is visible.
1523  */
1524  if (anyvisible)
1525  {
1526  indexIt = true;
1527  tupleIsAlive = false;
1528  reltuples += 1;
1529  break;
1530  }
1531 
1532  xwait = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1534  {
1535  if (!is_system_catalog)
1536  elog(WARNING, "concurrent delete in progress within table \"%s\"",
1537  RelationGetRelationName(heapRelation));
1538 
1539  /*
1540  * If we are performing uniqueness checks, assuming
1541  * the tuple is dead could lead to missing a
1542  * uniqueness violation. In that case we wait for the
1543  * deleting transaction to finish and check again.
1544  *
1545  * Also, if it's a HOT-updated tuple, we should not
1546  * index it but rather the live tuple at the end of
1547  * the HOT-chain. However, the deleting transaction
1548  * could abort, possibly leaving this tuple as live
1549  * after all, in which case it has to be indexed. The
1550  * only way to know what to do is to wait for the
1551  * deleting transaction to finish and check again.
1552  */
1553  if (checking_uniqueness ||
1554  HeapTupleIsHotUpdated(heapTuple))
1555  {
1556  /*
1557  * Must drop the lock on the buffer before we wait
1558  */
1560  XactLockTableWait(xwait, heapRelation,
1561  &heapTuple->t_self,
1564  goto recheck;
1565  }
1566 
1567  /*
1568  * Otherwise index it but don't check for uniqueness,
1569  * the same as a RECENTLY_DEAD tuple.
1570  */
1571  indexIt = true;
1572 
1573  /*
1574  * Count HEAPTUPLE_DELETE_IN_PROGRESS tuples as live,
1575  * if they were not deleted by the current
1576  * transaction. That's what
1577  * heapam_scan_analyze_next_tuple() does, and we want
1578  * the behavior to be consistent.
1579  */
1580  reltuples += 1;
1581  }
1582  else if (HeapTupleIsHotUpdated(heapTuple))
1583  {
1584  /*
1585  * It's a HOT-updated tuple deleted by our own xact.
1586  * We can assume the deletion will commit (else the
1587  * index contents don't matter), so treat the same as
1588  * RECENTLY_DEAD HOT-updated tuples.
1589  */
1590  indexIt = false;
1591  /* mark the index as unsafe for old snapshots */
1592  indexInfo->ii_BrokenHotChain = true;
1593  }
1594  else
1595  {
1596  /*
1597  * It's a regular tuple deleted by our own xact. Index
1598  * it, but don't check for uniqueness nor count in
1599  * reltuples, the same as a RECENTLY_DEAD tuple.
1600  */
1601  indexIt = true;
1602  }
1603  /* In any case, exclude the tuple from unique-checking */
1604  tupleIsAlive = false;
1605  break;
1606  default:
1607  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1608  indexIt = tupleIsAlive = false; /* keep compiler quiet */
1609  break;
1610  }
1611 
1613 
1614  if (!indexIt)
1615  continue;
1616  }
1617  else
1618  {
1619  /* heap_getnext did the time qual check */
1620  tupleIsAlive = true;
1621  reltuples += 1;
1622  }
1623 
1625 
1626  /* Set up for predicate or expression evaluation */
1627  ExecStoreBufferHeapTuple(heapTuple, slot, hscan->rs_cbuf);
1628 
1629  /*
1630  * In a partial index, discard tuples that don't satisfy the
1631  * predicate.
1632  */
1633  if (predicate != NULL)
1634  {
1635  if (!ExecQual(predicate, econtext))
1636  continue;
1637  }
1638 
1639  /*
1640  * For the current heap tuple, extract all the attributes we use in
1641  * this index, and note which are null. This also performs evaluation
1642  * of any expressions needed.
1643  */
1644  FormIndexDatum(indexInfo,
1645  slot,
1646  estate,
1647  values,
1648  isnull);
1649 
1650  /*
1651  * You'd think we should go ahead and build the index tuple here, but
1652  * some index AMs want to do further processing on the data first. So
1653  * pass the values[] and isnull[] arrays, instead.
1654  */
1655 
1656  if (HeapTupleIsHeapOnly(heapTuple))
1657  {
1658  /*
1659  * For a heap-only tuple, pretend its TID is that of the root. See
1660  * src/backend/access/heap/README.HOT for discussion.
1661  */
1662  ItemPointerData tid;
1663  OffsetNumber offnum;
1664 
1665  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
1666 
1667  /*
1668  * If a HOT tuple points to a root that we don't know about,
1669  * obtain root items afresh. If that still fails, report it as
1670  * corruption.
1671  */
1672  if (root_offsets[offnum - 1] == InvalidOffsetNumber)
1673  {
1674  Page page = BufferGetPage(hscan->rs_cbuf);
1675 
1677  heap_get_root_tuples(page, root_offsets);
1679  }
1680 
1681  if (!OffsetNumberIsValid(root_offsets[offnum - 1]))
1682  ereport(ERROR,
1684  errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
1685  ItemPointerGetBlockNumber(&heapTuple->t_self),
1686  offnum,
1687  RelationGetRelationName(heapRelation))));
1688 
1689  ItemPointerSet(&tid, ItemPointerGetBlockNumber(&heapTuple->t_self),
1690  root_offsets[offnum - 1]);
1691 
1692  /* Call the AM's callback routine to process the tuple */
1693  callback(indexRelation, &tid, values, isnull, tupleIsAlive,
1694  callback_state);
1695  }
1696  else
1697  {
1698  /* Call the AM's callback routine to process the tuple */
1699  callback(indexRelation, &heapTuple->t_self, values, isnull,
1700  tupleIsAlive, callback_state);
1701  }
1702  }
1703 
1704  /* Report scan progress one last time. */
1705  if (progress)
1706  {
1707  BlockNumber blks_done;
1708 
1709  if (hscan->rs_base.rs_parallel != NULL)
1710  {
1712 
1714  blks_done = pbscan->phs_nblocks;
1715  }
1716  else
1717  blks_done = hscan->rs_nblocks;
1718 
1720  blks_done);
1721  }
1722 
1723  table_endscan(scan);
1724 
1725  /* we can now forget our snapshot, if set and registered by us */
1726  if (need_unregister_snapshot)
1727  UnregisterSnapshot(snapshot);
1728 
1730 
1731  FreeExecutorState(estate);
1732 
1733  /* These may have been pointing to the now-gone estate */
1734  indexInfo->ii_ExpressionsState = NIL;
1735  indexInfo->ii_PredicateState = NULL;
1736 
1737  return reltuples;
1738 }
void pgstat_progress_update_param(int index, int64 val)
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static Datum values[MAXATTR]
Definition: bootstrap.c:156
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4715
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:157
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:158
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:350
Pointer Page
Definition: bufpage.h:78
uint32 TransactionId
Definition: c.h:641
#define OidIsValid(objectId)
Definition: c.h:764
bool IsSystemRelation(Relation relation)
Definition: catalog.c:75
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1156
int errcode(int sqlerrcode)
Definition: elog.c:858
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
ExprState * ExecPrepareQual(List *qual, EState *estate)
Definition: execExpr.c:764
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1255
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1393
EState * CreateExecutorState(void)
Definition: execUtils.c:93
void FreeExecutorState(EState *estate)
Definition: execUtils.c:194
#define GetPerTupleExprContext(estate)
Definition: executor.h:549
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition: executor.h:412
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition: heapam.c:1086
void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
Definition: heapam.c:354
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:80
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:98
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:99
@ HEAPTUPLE_LIVE
Definition: heapam.h:97
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:100
@ HEAPTUPLE_DEAD
Definition: heapam.h:96
static BlockNumber heapam_scan_get_blocks_done(HeapScanDesc hscan)
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:309
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:683
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:674
#define MaxHeapTuplesPerPage
Definition: htup_details.h:572
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:361
void FormIndexDatum(IndexInfo *indexInfo, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
Definition: index.c:2726
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:668
@ XLTW_InsertIndexUnique
Definition: lmgr.h:32
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:330
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:414
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
uint16 OffsetNumber
Definition: off.h:24
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:41
#define INDEX_MAX_KEYS
#define NIL
Definition: pg_list.h:68
int progress
Definition: pgbench.c:261
uintptr_t Datum
Definition: postgres.h:64
TransactionId GetOldestNonRemovableTransactionId(Relation rel)
Definition: procarray.c:1986
#define PROGRESS_SCAN_BLOCKS_DONE
Definition: progress.h:122
#define PROGRESS_SCAN_BLOCKS_TOTAL
Definition: progress.h:121
void heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
Definition: pruneheap.c:996
#define RelationGetRelationName(relation)
Definition: rel.h:538
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:85
@ ForwardScanDirection
Definition: sdir.h:28
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:197
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:817
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:775
#define SnapshotAny
Definition: snapmgr.h:33
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:62
MemoryContext ecxt_per_tuple_memory
Definition: execnodes.h:257
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:249
Buffer rs_cbuf
Definition: heapam.h:62
BlockNumber rs_nblocks
Definition: heapam.h:53
BlockNumber rs_cblock
Definition: heapam.h:61
TableScanDescData rs_base
Definition: heapam.h:50
ItemPointerData t_self
Definition: htup.h:65
HeapTupleHeader t_data
Definition: htup.h:68
bool ii_Unique
Definition: execnodes.h:191
bool ii_BrokenHotChain
Definition: execnodes.h:197
ExprState * ii_PredicateState
Definition: execnodes.h:183
Oid * ii_ExclusionOps
Definition: execnodes.h:184
bool ii_Concurrent
Definition: execnodes.h:196
List * ii_ExpressionsState
Definition: execnodes.h:181
List * ii_Predicate
Definition: execnodes.h:182
Form_pg_class rd_rel
Definition: rel.h:111
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:49
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:91
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:1009
static TableScanDesc table_beginscan_strat(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, bool allow_strat, bool allow_sync)
Definition: tableam.h:925
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:46
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdIsValid(xid)
Definition: transam.h:41
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:926

References Assert(), BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage(), callback(), CHECK_FOR_INTERRUPTS, CreateExecutorState(), ExprContext::ecxt_per_tuple_memory, ExprContext::ecxt_scantuple, elog(), ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, ExecDropSingleTupleTableSlot(), ExecPrepareQual(), ExecQual(), ExecStoreBufferHeapTuple(), FormIndexDatum(), ForwardScanDirection, FreeExecutorState(), GetOldestNonRemovableTransactionId(), GetPerTupleExprContext, GetTransactionSnapshot(), heap_get_root_tuples(), heap_getnext(), heap_setscanlimits(), heapam_scan_get_blocks_done(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleSatisfiesVacuum(), IndexInfo::ii_BrokenHotChain, IndexInfo::ii_Concurrent, IndexInfo::ii_ExclusionOps, IndexInfo::ii_ExpressionsState, IndexInfo::ii_Predicate, IndexInfo::ii_PredicateState, IndexInfo::ii_Unique, INDEX_MAX_KEYS, InvalidBlockNumber, InvalidOffsetNumber, InvalidTransactionId, IsBootstrapProcessingMode, IsMVCCSnapshot, IsSystemRelation(), ItemPointerGetBlockNumber(), ItemPointerGetOffsetNumber(), ItemPointerSet(), LockBuffer(), MaxHeapTuplesPerPage, MemoryContextReset(), NIL, OffsetNumberIsValid, OidIsValid, pgstat_progress_update_param(), ParallelBlockTableScanDescData::phs_nblocks, progress, PROGRESS_SCAN_BLOCKS_DONE, PROGRESS_SCAN_BLOCKS_TOTAL, RelationData::rd_rel, RegisterSnapshot(), RelationGetRelationName, HeapScanDescData::rs_base, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_nblocks, TableScanDescData::rs_parallel, TableScanDescData::rs_snapshot, SnapshotAny, HeapTupleData::t_data, HeapTupleData::t_self, table_beginscan_strat(), table_endscan(), table_slot_create(), TransactionIdIsCurrentTransactionId(), TransactionIdIsValid, UnregisterSnapshot(), values, WARNING, XactLockTableWait(), and XLTW_InsertIndexUnique.

◆ heapam_index_fetch_begin()

static IndexFetchTableData* heapam_index_fetch_begin ( Relation  rel)
static

Definition at line 79 of file heapam_handler.c.

80 {
82 
83  hscan->xs_base.rel = rel;
84  hscan->xs_cbuf = InvalidBuffer;
85 
86  return &hscan->xs_base;
87 }
#define InvalidBuffer
Definition: buf.h:25
void * palloc0(Size size)
Definition: mcxt.c:1257
Buffer xs_cbuf
Definition: heapam.h:89
IndexFetchTableData xs_base
Definition: heapam.h:87

References InvalidBuffer, palloc0(), IndexFetchTableData::rel, IndexFetchHeapData::xs_base, and IndexFetchHeapData::xs_cbuf.

◆ heapam_index_fetch_end()

static void heapam_index_fetch_end ( IndexFetchTableData scan)
static

Definition at line 102 of file heapam_handler.c.

103 {
104  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
105 
107 
108  pfree(hscan);
109 }
static void heapam_index_fetch_reset(IndexFetchTableData *scan)
void pfree(void *pointer)
Definition: mcxt.c:1456

References heapam_index_fetch_reset(), and pfree().

◆ heapam_index_fetch_reset()

static void heapam_index_fetch_reset ( IndexFetchTableData scan)
static

Definition at line 90 of file heapam_handler.c.

91 {
92  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
93 
94  if (BufferIsValid(hscan->xs_cbuf))
95  {
96  ReleaseBuffer(hscan->xs_cbuf);
97  hscan->xs_cbuf = InvalidBuffer;
98  }
99 }
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4480
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:301

References BufferIsValid(), InvalidBuffer, ReleaseBuffer(), and IndexFetchHeapData::xs_cbuf.

Referenced by heapam_index_fetch_end().

◆ heapam_index_fetch_tuple()

static bool heapam_index_fetch_tuple ( struct IndexFetchTableData scan,
ItemPointer  tid,
Snapshot  snapshot,
TupleTableSlot slot,
bool call_again,
bool all_dead 
)
static

Definition at line 112 of file heapam_handler.c.

117 {
118  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
120  bool got_heap_tuple;
121 
122  Assert(TTS_IS_BUFFERTUPLE(slot));
123 
124  /* We can skip the buffer-switching logic if we're in mid-HOT chain. */
125  if (!*call_again)
126  {
127  /* Switch to correct buffer if we don't have it already */
128  Buffer prev_buf = hscan->xs_cbuf;
129 
130  hscan->xs_cbuf = ReleaseAndReadBuffer(hscan->xs_cbuf,
131  hscan->xs_base.rel,
133 
134  /*
135  * Prune page, but only if we weren't already on this page
136  */
137  if (prev_buf != hscan->xs_cbuf)
138  heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf);
139  }
140 
141  /* Obtain share-lock on the buffer so we can examine visibility */
143  got_heap_tuple = heap_hot_search_buffer(tid,
144  hscan->xs_base.rel,
145  hscan->xs_cbuf,
146  snapshot,
147  &bslot->base.tupdata,
148  all_dead,
149  !*call_again);
150  bslot->base.tupdata.t_self = *tid;
152 
153  if (got_heap_tuple)
154  {
155  /*
156  * Only in a non-MVCC snapshot can more than one member of the HOT
157  * chain be visible.
158  */
159  *call_again = !IsMVCCSnapshot(snapshot);
160 
161  slot->tts_tableOid = RelationGetRelid(scan->rel);
162  ExecStoreBufferHeapTuple(&bslot->base.tupdata, slot, hscan->xs_cbuf);
163  }
164  else
165  {
166  /* We've reached the end of the HOT chain. */
167  *call_again = false;
168  }
169 
170  return got_heap_tuple;
171 }
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:2174
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:1465
void heap_page_prune_opt(Relation relation, Buffer buffer)
Definition: pruneheap.c:96

References Assert(), BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, ExecStoreBufferHeapTuple(), heap_hot_search_buffer(), heap_page_prune_opt(), IsMVCCSnapshot, ItemPointerGetBlockNumber(), LockBuffer(), IndexFetchTableData::rel, RelationGetRelid, ReleaseAndReadBuffer(), TTS_IS_BUFFERTUPLE, TupleTableSlot::tts_tableOid, IndexFetchHeapData::xs_base, and IndexFetchHeapData::xs_cbuf.

◆ heapam_index_validate_scan()

static void heapam_index_validate_scan ( Relation  heapRelation,
Relation  indexRelation,
IndexInfo indexInfo,
Snapshot  snapshot,
ValidateIndexState state 
)
static

Definition at line 1741 of file heapam_handler.c.

1746 {
1747  TableScanDesc scan;
1748  HeapScanDesc hscan;
1749  HeapTuple heapTuple;
1751  bool isnull[INDEX_MAX_KEYS];
1752  ExprState *predicate;
1753  TupleTableSlot *slot;
1754  EState *estate;
1755  ExprContext *econtext;
1756  BlockNumber root_blkno = InvalidBlockNumber;
1757  OffsetNumber root_offsets[MaxHeapTuplesPerPage];
1758  bool in_index[MaxHeapTuplesPerPage];
1759  BlockNumber previous_blkno = InvalidBlockNumber;
1760 
1761  /* state variables for the merge */
1762  ItemPointer indexcursor = NULL;
1763  ItemPointerData decoded;
1764  bool tuplesort_empty = false;
1765 
1766  /*
1767  * sanity checks
1768  */
1769  Assert(OidIsValid(indexRelation->rd_rel->relam));
1770 
1771  /*
1772  * Need an EState for evaluation of index expressions and partial-index
1773  * predicates. Also a slot to hold the current tuple.
1774  */
1775  estate = CreateExecutorState();
1776  econtext = GetPerTupleExprContext(estate);
1777  slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation),
1778  &TTSOpsHeapTuple);
1779 
1780  /* Arrange for econtext's scan tuple to be the tuple under test */
1781  econtext->ecxt_scantuple = slot;
1782 
1783  /* Set up execution state for predicate, if any. */
1784  predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
1785 
1786  /*
1787  * Prepare for scan of the base relation. We need just those tuples
1788  * satisfying the passed-in reference snapshot. We must disable syncscan
1789  * here, because it's critical that we read from block zero forward to
1790  * match the sorted TIDs.
1791  */
1792  scan = table_beginscan_strat(heapRelation, /* relation */
1793  snapshot, /* snapshot */
1794  0, /* number of keys */
1795  NULL, /* scan key */
1796  true, /* buffer access strategy OK */
1797  false); /* syncscan not OK */
1798  hscan = (HeapScanDesc) scan;
1799 
1801  hscan->rs_nblocks);
1802 
1803  /*
1804  * Scan all tuples matching the snapshot.
1805  */
1806  while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
1807  {
1808  ItemPointer heapcursor = &heapTuple->t_self;
1809  ItemPointerData rootTuple;
1810  OffsetNumber root_offnum;
1811 
1813 
1814  state->htups += 1;
1815 
1816  if ((previous_blkno == InvalidBlockNumber) ||
1817  (hscan->rs_cblock != previous_blkno))
1818  {
1820  hscan->rs_cblock);
1821  previous_blkno = hscan->rs_cblock;
1822  }
1823 
1824  /*
1825  * As commented in table_index_build_scan, we should index heap-only
1826  * tuples under the TIDs of their root tuples; so when we advance onto
1827  * a new heap page, build a map of root item offsets on the page.
1828  *
1829  * This complicates merging against the tuplesort output: we will
1830  * visit the live tuples in order by their offsets, but the root
1831  * offsets that we need to compare against the index contents might be
1832  * ordered differently. So we might have to "look back" within the
1833  * tuplesort output, but only within the current page. We handle that
1834  * by keeping a bool array in_index[] showing all the
1835  * already-passed-over tuplesort output TIDs of the current page. We
1836  * clear that array here, when advancing onto a new heap page.
1837  */
1838  if (hscan->rs_cblock != root_blkno)
1839  {
1840  Page page = BufferGetPage(hscan->rs_cbuf);
1841 
1843  heap_get_root_tuples(page, root_offsets);
1845 
1846  memset(in_index, 0, sizeof(in_index));
1847 
1848  root_blkno = hscan->rs_cblock;
1849  }
1850 
1851  /* Convert actual tuple TID to root TID */
1852  rootTuple = *heapcursor;
1853  root_offnum = ItemPointerGetOffsetNumber(heapcursor);
1854 
1855  if (HeapTupleIsHeapOnly(heapTuple))
1856  {
1857  root_offnum = root_offsets[root_offnum - 1];
1858  if (!OffsetNumberIsValid(root_offnum))
1859  ereport(ERROR,
1861  errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
1862  ItemPointerGetBlockNumber(heapcursor),
1863  ItemPointerGetOffsetNumber(heapcursor),
1864  RelationGetRelationName(heapRelation))));
1865  ItemPointerSetOffsetNumber(&rootTuple, root_offnum);
1866  }
1867 
1868  /*
1869  * "merge" by skipping through the index tuples until we find or pass
1870  * the current root tuple.
1871  */
1872  while (!tuplesort_empty &&
1873  (!indexcursor ||
1874  ItemPointerCompare(indexcursor, &rootTuple) < 0))
1875  {
1876  Datum ts_val;
1877  bool ts_isnull;
1878 
1879  if (indexcursor)
1880  {
1881  /*
1882  * Remember index items seen earlier on the current heap page
1883  */
1884  if (ItemPointerGetBlockNumber(indexcursor) == root_blkno)
1885  in_index[ItemPointerGetOffsetNumber(indexcursor) - 1] = true;
1886  }
1887 
1888  tuplesort_empty = !tuplesort_getdatum(state->tuplesort, true,
1889  false, &ts_val, &ts_isnull,
1890  NULL);
1891  Assert(tuplesort_empty || !ts_isnull);
1892  if (!tuplesort_empty)
1893  {
1894  itemptr_decode(&decoded, DatumGetInt64(ts_val));
1895  indexcursor = &decoded;
1896  }
1897  else
1898  {
1899  /* Be tidy */
1900  indexcursor = NULL;
1901  }
1902  }
1903 
1904  /*
1905  * If the tuplesort has overshot *and* we didn't see a match earlier,
1906  * then this tuple is missing from the index, so insert it.
1907  */
1908  if ((tuplesort_empty ||
1909  ItemPointerCompare(indexcursor, &rootTuple) > 0) &&
1910  !in_index[root_offnum - 1])
1911  {
1913 
1914  /* Set up for predicate or expression evaluation */
1915  ExecStoreHeapTuple(heapTuple, slot, false);
1916 
1917  /*
1918  * In a partial index, discard tuples that don't satisfy the
1919  * predicate.
1920  */
1921  if (predicate != NULL)
1922  {
1923  if (!ExecQual(predicate, econtext))
1924  continue;
1925  }
1926 
1927  /*
1928  * For the current heap tuple, extract all the attributes we use
1929  * in this index, and note which are null. This also performs
1930  * evaluation of any expressions needed.
1931  */
1932  FormIndexDatum(indexInfo,
1933  slot,
1934  estate,
1935  values,
1936  isnull);
1937 
1938  /*
1939  * You'd think we should go ahead and build the index tuple here,
1940  * but some index AMs want to do further processing on the data
1941  * first. So pass the values[] and isnull[] arrays, instead.
1942  */
1943 
1944  /*
1945  * If the tuple is already committed dead, you might think we
1946  * could suppress uniqueness checking, but this is no longer true
1947  * in the presence of HOT, because the insert is actually a proxy
1948  * for a uniqueness check on the whole HOT-chain. That is, the
1949  * tuple we have here could be dead because it was already
1950  * HOT-updated, and if so the updating transaction will not have
1951  * thought it should insert index entries. The index AM will
1952  * check the whole HOT-chain and correctly detect a conflict if
1953  * there is one.
1954  */
1955 
1956  index_insert(indexRelation,
1957  values,
1958  isnull,
1959  &rootTuple,
1960  heapRelation,
1961  indexInfo->ii_Unique ?
1963  false,
1964  indexInfo);
1965 
1966  state->tups_inserted += 1;
1967  }
1968  }
1969 
1970  table_endscan(scan);
1971 
1973 
1974  FreeExecutorState(estate);
1975 
1976  /* These may have been pointing to the now-gone estate */
1977  indexInfo->ii_ExpressionsState = NIL;
1978  indexInfo->ii_PredicateState = NULL;
1979 }
TupleTableSlot * ExecStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1353
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:84
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1239
@ UNIQUE_CHECK_NO
Definition: genam.h:117
@ UNIQUE_CHECK_YES
Definition: genam.h:118
static void itemptr_decode(ItemPointer itemptr, int64 encoded)
Definition: index.h:206
bool index_insert(Relation indexRelation, Datum *values, bool *isnull, ItemPointer heap_t_ctid, Relation heapRelation, IndexUniqueCheck checkUnique, bool indexUnchanged, IndexInfo *indexInfo)
Definition: indexam.c:176
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:51
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:385
#define RelationGetDescr(relation)
Definition: rel.h:530
Definition: regguts.h:323
bool tuplesort_getdatum(Tuplesortstate *state, bool forward, bool copy, Datum *val, bool *isNull, Datum *abbrev)

References Assert(), BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage(), CHECK_FOR_INTERRUPTS, CreateExecutorState(), DatumGetInt64(), ExprContext::ecxt_per_tuple_memory, ExprContext::ecxt_scantuple, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, ExecDropSingleTupleTableSlot(), ExecPrepareQual(), ExecQual(), ExecStoreHeapTuple(), FormIndexDatum(), ForwardScanDirection, FreeExecutorState(), GetPerTupleExprContext, heap_get_root_tuples(), heap_getnext(), HeapTupleIsHeapOnly, IndexInfo::ii_ExpressionsState, IndexInfo::ii_Predicate, IndexInfo::ii_PredicateState, IndexInfo::ii_Unique, index_insert(), INDEX_MAX_KEYS, InvalidBlockNumber, ItemPointerCompare(), ItemPointerGetBlockNumber(), ItemPointerGetOffsetNumber(), ItemPointerSetOffsetNumber(), itemptr_decode(), LockBuffer(), MakeSingleTupleTableSlot(), MaxHeapTuplesPerPage, MemoryContextReset(), NIL, OffsetNumberIsValid, OidIsValid, pgstat_progress_update_param(), PROGRESS_SCAN_BLOCKS_DONE, PROGRESS_SCAN_BLOCKS_TOTAL, RelationData::rd_rel, RelationGetDescr, RelationGetRelationName, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_nblocks, HeapTupleData::t_self, table_beginscan_strat(), table_endscan(), TTSOpsHeapTuple, tuplesort_getdatum(), UNIQUE_CHECK_NO, UNIQUE_CHECK_YES, and values.

◆ heapam_relation_copy_data()

static void heapam_relation_copy_data ( Relation  rel,
const RelFileLocator newrlocator 
)
static

Definition at line 632 of file heapam_handler.c.

633 {
634  SMgrRelation dstrel;
635 
636  dstrel = smgropen(*newrlocator, rel->rd_backend);
637 
638  /*
639  * Since we copy the file directly without looking at the shared buffers,
640  * we'd better first flush out any pages of the source relation that are
641  * in shared buffers. We assume no new changes will be made while we are
642  * holding exclusive lock on the rel.
643  */
645 
646  /*
647  * Create and copy all forks of the relation, and schedule unlinking of
648  * old physical files.
649  *
650  * NOTE: any conflict in relfilenumber value will be caught in
651  * RelationCreateStorage().
652  */
653  RelationCreateStorage(*newrlocator, rel->rd_rel->relpersistence, true);
654 
655  /* copy main fork */
657  rel->rd_rel->relpersistence);
658 
659  /* copy those extra forks that exist */
660  for (ForkNumber forkNum = MAIN_FORKNUM + 1;
661  forkNum <= MAX_FORKNUM; forkNum++)
662  {
663  if (smgrexists(RelationGetSmgr(rel), forkNum))
664  {
665  smgrcreate(dstrel, forkNum, false);
666 
667  /*
668  * WAL log creation if the relation is persistent, or this is the
669  * init fork of an unlogged relation.
670  */
671  if (RelationIsPermanent(rel) ||
672  (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
673  forkNum == INIT_FORKNUM))
674  log_smgrcreate(newrlocator, forkNum);
675  RelationCopyStorage(RelationGetSmgr(rel), dstrel, forkNum,
676  rel->rd_rel->relpersistence);
677  }
678  }
679 
680 
681  /* drop old relation, and close new one */
682  RelationDropStorage(rel);
683  smgrclose(dstrel);
684 }
void FlushRelationBuffers(Relation rel)
Definition: bufmgr.c:4058
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:572
#define RelationIsPermanent(relation)
Definition: rel.h:618
ForkNumber
Definition: relpath.h:48
@ MAIN_FORKNUM
Definition: relpath.h:50
@ INIT_FORKNUM
Definition: relpath.h:53
#define MAX_FORKNUM
Definition: relpath.h:62
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:374
void smgrclose(SMgrRelation reln)
Definition: smgr.c:260
SMgrRelation smgropen(RelFileLocator rlocator, BackendId backend)
Definition: smgr.c:150
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:251
void RelationCopyStorage(SMgrRelation src, SMgrRelation dst, ForkNumber forkNum, char relpersistence)
Definition: storage.c:451
SMgrRelation RelationCreateStorage(RelFileLocator rlocator, char relpersistence, bool register_delete)
Definition: storage.c:120
void log_smgrcreate(const RelFileLocator *rlocator, ForkNumber forkNum)
Definition: storage.c:185
void RelationDropStorage(Relation rel)
Definition: storage.c:205
BackendId rd_backend
Definition: rel.h:60

References FlushRelationBuffers(), INIT_FORKNUM, log_smgrcreate(), MAIN_FORKNUM, MAX_FORKNUM, RelationData::rd_backend, RelationData::rd_rel, RelationCopyStorage(), RelationCreateStorage(), RelationDropStorage(), RelationGetSmgr(), RelationIsPermanent, smgrclose(), smgrcreate(), smgrexists(), and smgropen().

◆ heapam_relation_copy_for_cluster()

static void heapam_relation_copy_for_cluster ( Relation  OldHeap,
Relation  NewHeap,
Relation  OldIndex,
bool  use_sort,
TransactionId  OldestXmin,
TransactionId xid_cutoff,
MultiXactId multi_cutoff,
double *  num_tuples,
double *  tups_vacuumed,
double *  tups_recently_dead 
)
static

Definition at line 687 of file heapam_handler.c.

695 {
696  RewriteState rwstate;
697  IndexScanDesc indexScan;
698  TableScanDesc tableScan;
699  HeapScanDesc heapScan;
700  bool is_system_catalog;
701  Tuplesortstate *tuplesort;
702  TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
703  TupleDesc newTupDesc = RelationGetDescr(NewHeap);
704  TupleTableSlot *slot;
705  int natts;
706  Datum *values;
707  bool *isnull;
709  BlockNumber prev_cblock = InvalidBlockNumber;
710 
711  /* Remember if it's a system catalog */
712  is_system_catalog = IsSystemRelation(OldHeap);
713 
714  /*
715  * Valid smgr_targblock implies something already wrote to the relation.
716  * This may be harmless, but this function hasn't planned for it.
717  */
719 
720  /* Preallocate values/isnull arrays */
721  natts = newTupDesc->natts;
722  values = (Datum *) palloc(natts * sizeof(Datum));
723  isnull = (bool *) palloc(natts * sizeof(bool));
724 
725  /* Initialize the rewrite operation */
726  rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestXmin, *xid_cutoff,
727  *multi_cutoff);
728 
729 
730  /* Set up sorting if wanted */
731  if (use_sort)
732  tuplesort = tuplesort_begin_cluster(oldTupDesc, OldIndex,
734  NULL, TUPLESORT_NONE);
735  else
736  tuplesort = NULL;
737 
738  /*
739  * Prepare to scan the OldHeap. To ensure we see recently-dead tuples
740  * that still need to be copied, we scan with SnapshotAny and use
741  * HeapTupleSatisfiesVacuum for the visibility test.
742  */
743  if (OldIndex != NULL && !use_sort)
744  {
745  const int ci_index[] = {
748  };
749  int64 ci_val[2];
750 
751  /* Set phase and OIDOldIndex to columns */
753  ci_val[1] = RelationGetRelid(OldIndex);
754  pgstat_progress_update_multi_param(2, ci_index, ci_val);
755 
756  tableScan = NULL;
757  heapScan = NULL;
758  indexScan = index_beginscan(OldHeap, OldIndex, SnapshotAny, 0, 0);
759  index_rescan(indexScan, NULL, 0, NULL, 0);
760  }
761  else
762  {
763  /* In scan-and-sort mode and also VACUUM FULL, set phase */
766 
767  tableScan = table_beginscan(OldHeap, SnapshotAny, 0, (ScanKey) NULL);
768  heapScan = (HeapScanDesc) tableScan;
769  indexScan = NULL;
770 
771  /* Set total heap blocks */
773  heapScan->rs_nblocks);
774  }
775 
776  slot = table_slot_create(OldHeap, NULL);
777  hslot = (BufferHeapTupleTableSlot *) slot;
778 
779  /*
780  * Scan through the OldHeap, either in OldIndex order or sequentially;
781  * copy each tuple into the NewHeap, or transiently to the tuplesort
782  * module. Note that we don't bother sorting dead tuples (they won't get
783  * to the new table anyway).
784  */
785  for (;;)
786  {
787  HeapTuple tuple;
788  Buffer buf;
789  bool isdead;
790 
792 
793  if (indexScan != NULL)
794  {
795  if (!index_getnext_slot(indexScan, ForwardScanDirection, slot))
796  break;
797 
798  /* Since we used no scan keys, should never need to recheck */
799  if (indexScan->xs_recheck)
800  elog(ERROR, "CLUSTER does not support lossy index conditions");
801  }
802  else
803  {
804  if (!table_scan_getnextslot(tableScan, ForwardScanDirection, slot))
805  {
806  /*
807  * If the last pages of the scan were empty, we would go to
808  * the next phase while heap_blks_scanned != heap_blks_total.
809  * Instead, to ensure that heap_blks_scanned is equivalent to
810  * heap_blks_total after the table scan phase, this parameter
811  * is manually updated to the correct value when the table
812  * scan finishes.
813  */
815  heapScan->rs_nblocks);
816  break;
817  }
818 
819  /*
820  * In scan-and-sort mode and also VACUUM FULL, set heap blocks
821  * scanned
822  *
823  * Note that heapScan may start at an offset and wrap around, i.e.
824  * rs_startblock may be >0, and rs_cblock may end with a number
825  * below rs_startblock. To prevent showing this wraparound to the
826  * user, we offset rs_cblock by rs_startblock (modulo rs_nblocks).
827  */
828  if (prev_cblock != heapScan->rs_cblock)
829  {
831  (heapScan->rs_cblock +
832  heapScan->rs_nblocks -
833  heapScan->rs_startblock
834  ) % heapScan->rs_nblocks + 1);
835  prev_cblock = heapScan->rs_cblock;
836  }
837  }
838 
839  tuple = ExecFetchSlotHeapTuple(slot, false, NULL);
840  buf = hslot->buffer;
841 
843 
844  switch (HeapTupleSatisfiesVacuum(tuple, OldestXmin, buf))
845  {
846  case HEAPTUPLE_DEAD:
847  /* Definitely dead */
848  isdead = true;
849  break;
851  *tups_recently_dead += 1;
852  /* fall through */
853  case HEAPTUPLE_LIVE:
854  /* Live or recently dead, must copy it */
855  isdead = false;
856  break;
858 
859  /*
860  * Since we hold exclusive lock on the relation, normally the
861  * only way to see this is if it was inserted earlier in our
862  * own transaction. However, it can happen in system
863  * catalogs, since we tend to release write lock before commit
864  * there. Give a warning if neither case applies; but in any
865  * case we had better copy it.
866  */
867  if (!is_system_catalog &&
869  elog(WARNING, "concurrent insert in progress within table \"%s\"",
870  RelationGetRelationName(OldHeap));
871  /* treat as live */
872  isdead = false;
873  break;
875 
876  /*
877  * Similar situation to INSERT_IN_PROGRESS case.
878  */
879  if (!is_system_catalog &&
881  elog(WARNING, "concurrent delete in progress within table \"%s\"",
882  RelationGetRelationName(OldHeap));
883  /* treat as recently dead */
884  *tups_recently_dead += 1;
885  isdead = false;
886  break;
887  default:
888  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
889  isdead = false; /* keep compiler quiet */
890  break;
891  }
892 
894 
895  if (isdead)
896  {
897  *tups_vacuumed += 1;
898  /* heap rewrite module still needs to see it... */
899  if (rewrite_heap_dead_tuple(rwstate, tuple))
900  {
901  /* A previous recently-dead tuple is now known dead */
902  *tups_vacuumed += 1;
903  *tups_recently_dead -= 1;
904  }
905  continue;
906  }
907 
908  *num_tuples += 1;
909  if (tuplesort != NULL)
910  {
911  tuplesort_putheaptuple(tuplesort, tuple);
912 
913  /*
914  * In scan-and-sort mode, report increase in number of tuples
915  * scanned
916  */
918  *num_tuples);
919  }
920  else
921  {
922  const int ct_index[] = {
925  };
926  int64 ct_val[2];
927 
928  reform_and_rewrite_tuple(tuple, OldHeap, NewHeap,
929  values, isnull, rwstate);
930 
931  /*
932  * In indexscan mode and also VACUUM FULL, report increase in
933  * number of tuples scanned and written
934  */
935  ct_val[0] = *num_tuples;
936  ct_val[1] = *num_tuples;
937  pgstat_progress_update_multi_param(2, ct_index, ct_val);
938  }
939  }
940 
941  if (indexScan != NULL)
942  index_endscan(indexScan);
943  if (tableScan != NULL)
944  table_endscan(tableScan);
945  if (slot)
947 
948  /*
949  * In scan-and-sort mode, complete the sort, then read out all live tuples
950  * from the tuplestore and write them to the new relation.
951  */
952  if (tuplesort != NULL)
953  {
954  double n_tuples = 0;
955 
956  /* Report that we are now sorting tuples */
959 
960  tuplesort_performsort(tuplesort);
961 
962  /* Report that we are now writing new heap */
965 
966  for (;;)
967  {
968  HeapTuple tuple;
969 
971 
972  tuple = tuplesort_getheaptuple(tuplesort, true);
973  if (tuple == NULL)
974  break;
975 
976  n_tuples += 1;
978  OldHeap, NewHeap,
979  values, isnull,
980  rwstate);
981  /* Report n_tuples */
983  n_tuples);
984  }
985 
986  tuplesort_end(tuplesort);
987  }
988 
989  /* Write out any remaining tuples, and fsync if needed */
990  end_heap_rewrite(rwstate);
991 
992  /* Clean up */
993  pfree(values);
994  pfree(isnull);
995 }
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1645
int maintenance_work_mem
Definition: globals.c:127
static void reform_and_rewrite_tuple(HeapTuple tuple, Relation OldHeap, Relation NewHeap, Datum *values, bool *isnull, RewriteState rwstate)
bool index_getnext_slot(IndexScanDesc scan, ScanDirection direction, TupleTableSlot *slot)
Definition: indexam.c:624
IndexScanDesc index_beginscan(Relation heapRelation, Relation indexRelation, Snapshot snapshot, int nkeys, int norderbys)
Definition: indexam.c:205
void index_endscan(IndexScanDesc scan)
Definition: indexam.c:327
void index_rescan(IndexScanDesc scan, ScanKey keys, int nkeys, ScanKey orderbys, int norderbys)
Definition: indexam.c:301
void * palloc(Size size)
Definition: mcxt.c:1226
static char * buf
Definition: pg_test_fsync.c:67
#define PROGRESS_CLUSTER_INDEX_RELID
Definition: progress.h:59
#define PROGRESS_CLUSTER_HEAP_BLKS_SCANNED
Definition: progress.h:63
#define PROGRESS_CLUSTER_PHASE_SORT_TUPLES
Definition: progress.h:69
#define PROGRESS_CLUSTER_PHASE_SEQ_SCAN_HEAP
Definition: progress.h:67
#define PROGRESS_CLUSTER_PHASE
Definition: progress.h:58
#define PROGRESS_CLUSTER_HEAP_TUPLES_SCANNED
Definition: progress.h:60
#define PROGRESS_CLUSTER_TOTAL_HEAP_BLKS
Definition: progress.h:62
#define PROGRESS_CLUSTER_HEAP_TUPLES_WRITTEN
Definition: progress.h:61
#define PROGRESS_CLUSTER_PHASE_INDEX_SCAN_HEAP
Definition: progress.h:68
#define PROGRESS_CLUSTER_PHASE_WRITE_NEW_HEAP
Definition: progress.h:70
#define RelationGetTargetBlock(relation)
Definition: rel.h:602
void end_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:299
bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
Definition: rewriteheap.c:562
RewriteState begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin, TransactionId freeze_xid, MultiXactId cutoff_multi)
Definition: rewriteheap.c:236
BlockNumber rs_startblock
Definition: heapam.h:54
static TableScanDesc table_beginscan(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key)
Definition: tableam.h:901
static bool table_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition: tableam.h:1050
void tuplesort_performsort(Tuplesortstate *state)
Definition: tuplesort.c:1382
void tuplesort_end(Tuplesortstate *state)
Definition: tuplesort.c:969
#define TUPLESORT_NONE
Definition: tuplesort.h:92
HeapTuple tuplesort_getheaptuple(Tuplesortstate *state, bool forward)
Tuplesortstate * tuplesort_begin_cluster(TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
void tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)

References Assert(), begin_heap_rewrite(), buf, BufferHeapTupleTableSlot::buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, CHECK_FOR_INTERRUPTS, elog(), end_heap_rewrite(), ERROR, ExecDropSingleTupleTableSlot(), ExecFetchSlotHeapTuple(), ForwardScanDirection, HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleSatisfiesVacuum(), index_beginscan(), index_endscan(), index_getnext_slot(), index_rescan(), InvalidBlockNumber, IsSystemRelation(), LockBuffer(), maintenance_work_mem, TupleDescData::natts, palloc(), pfree(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_CLUSTER_HEAP_BLKS_SCANNED, PROGRESS_CLUSTER_HEAP_TUPLES_SCANNED, PROGRESS_CLUSTER_HEAP_TUPLES_WRITTEN, PROGRESS_CLUSTER_INDEX_RELID, PROGRESS_CLUSTER_PHASE, PROGRESS_CLUSTER_PHASE_INDEX_SCAN_HEAP, PROGRESS_CLUSTER_PHASE_SEQ_SCAN_HEAP, PROGRESS_CLUSTER_PHASE_SORT_TUPLES, PROGRESS_CLUSTER_PHASE_WRITE_NEW_HEAP, PROGRESS_CLUSTER_TOTAL_HEAP_BLKS, reform_and_rewrite_tuple(), RelationGetDescr, RelationGetRelationName, RelationGetRelid, RelationGetTargetBlock, rewrite_heap_dead_tuple(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_nblocks, HeapScanDescData::rs_startblock, SnapshotAny, HeapTupleData::t_data, table_beginscan(), table_endscan(), table_scan_getnextslot(), table_slot_create(), TransactionIdIsCurrentTransactionId(), tuplesort_begin_cluster(), tuplesort_end(), tuplesort_getheaptuple(), TUPLESORT_NONE, tuplesort_performsort(), tuplesort_putheaptuple(), values, WARNING, and IndexScanDescData::xs_recheck.

◆ heapam_relation_needs_toast_table()

static bool heapam_relation_needs_toast_table ( Relation  rel)
static

Definition at line 2033 of file heapam_handler.c.

2034 {
2035  int32 data_length = 0;
2036  bool maxlength_unknown = false;
2037  bool has_toastable_attrs = false;
2038  TupleDesc tupdesc = rel->rd_att;
2039  int32 tuple_length;
2040  int i;
2041 
2042  for (i = 0; i < tupdesc->natts; i++)
2043  {
2044  Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2045 
2046  if (att->attisdropped)
2047  continue;
2048  data_length = att_align_nominal(data_length, att->attalign);
2049  if (att->attlen > 0)
2050  {
2051  /* Fixed-length types are never toastable */
2052  data_length += att->attlen;
2053  }
2054  else
2055  {
2056  int32 maxlen = type_maximum_size(att->atttypid,
2057  att->atttypmod);
2058 
2059  if (maxlen < 0)
2060  maxlength_unknown = true;
2061  else
2062  data_length += maxlen;
2063  if (att->attstorage != TYPSTORAGE_PLAIN)
2064  has_toastable_attrs = true;
2065  }
2066  }
2067  if (!has_toastable_attrs)
2068  return false; /* nothing to toast? */
2069  if (maxlength_unknown)
2070  return true; /* any unlimited-length attrs? */
2071  tuple_length = MAXALIGN(SizeofHeapTupleHeader +
2072  BITMAPLEN(tupdesc->natts)) +
2073  MAXALIGN(data_length);
2074  return (tuple_length > TOAST_TUPLE_THRESHOLD);
2075 }
#define MAXALIGN(LEN)
Definition: c.h:800
signed int int32
Definition: c.h:483
int32 type_maximum_size(Oid type_oid, int32 typemod)
Definition: format_type.c:412
#define TOAST_TUPLE_THRESHOLD
Definition: heaptoast.h:48
#define SizeofHeapTupleHeader
Definition: htup_details.h:185
#define BITMAPLEN(NATTS)
Definition: htup_details.h:545
int i
Definition: isn.c:73
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:209
TupleDesc rd_att
Definition: rel.h:112
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
#define att_align_nominal(cur_offset, attalign)
Definition: tupmacs.h:129

References att_align_nominal, BITMAPLEN, i, MAXALIGN, TupleDescData::natts, RelationData::rd_att, SizeofHeapTupleHeader, TOAST_TUPLE_THRESHOLD, TupleDescAttr, and type_maximum_size().

◆ heapam_relation_nontransactional_truncate()

static void heapam_relation_nontransactional_truncate ( Relation  rel)
static

Definition at line 626 of file heapam_handler.c.

627 {
628  RelationTruncate(rel, 0);
629 }
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:287

References RelationTruncate().

◆ heapam_relation_set_new_filelocator()

static void heapam_relation_set_new_filelocator ( Relation  rel,
const RelFileLocator newrlocator,
char  persistence,
TransactionId freezeXid,
MultiXactId minmulti 
)
static

Definition at line 580 of file heapam_handler.c.

585 {
586  SMgrRelation srel;
587 
588  /*
589  * Initialize to the minimum XID that could put tuples in the table. We
590  * know that no xacts older than RecentXmin are still running, so that
591  * will do.
592  */
593  *freezeXid = RecentXmin;
594 
595  /*
596  * Similarly, initialize the minimum Multixact to the first value that
597  * could possibly be stored in tuples in the table. Running transactions
598  * could reuse values from their local cache, so we are careful to
599  * consider all currently running multis.
600  *
601  * XXX this could be refined further, but is it worth the hassle?
602  */
603  *minmulti = GetOldestMultiXactId();
604 
605  srel = RelationCreateStorage(*newrlocator, persistence, true);
606 
607  /*
608  * If required, set up an init fork for an unlogged table so that it can
609  * be correctly reinitialized on restart. Recovery may remove it while
610  * replaying, for example, an XLOG_DBASE_CREATE* or XLOG_TBLSPC_CREATE
611  * record. Therefore, logging is necessary even if wal_level=minimal.
612  */
613  if (persistence == RELPERSISTENCE_UNLOGGED)
614  {
615  Assert(rel->rd_rel->relkind == RELKIND_RELATION ||
616  rel->rd_rel->relkind == RELKIND_MATVIEW ||
617  rel->rd_rel->relkind == RELKIND_TOASTVALUE);
618  smgrcreate(srel, INIT_FORKNUM, false);
619  log_smgrcreate(newrlocator, INIT_FORKNUM);
620  }
621 
622  smgrclose(srel);
623 }
MultiXactId GetOldestMultiXactId(void)
Definition: multixact.c:2507
TransactionId RecentXmin
Definition: snapmgr.c:105

References Assert(), GetOldestMultiXactId(), INIT_FORKNUM, log_smgrcreate(), RelationData::rd_rel, RecentXmin, RelationCreateStorage(), smgrclose(), and smgrcreate().

◆ heapam_relation_toast_am()

static Oid heapam_relation_toast_am ( Relation  rel)
static

Definition at line 2081 of file heapam_handler.c.

2082 {
2083  return rel->rd_rel->relam;
2084 }

References RelationData::rd_rel.

◆ heapam_scan_analyze_next_block()

static bool heapam_scan_analyze_next_block ( TableScanDesc  scan,
BlockNumber  blockno,
BufferAccessStrategy  bstrategy 
)
static

Definition at line 998 of file heapam_handler.c.

1000 {
1001  HeapScanDesc hscan = (HeapScanDesc) scan;
1002 
1003  /*
1004  * We must maintain a pin on the target page's buffer to ensure that
1005  * concurrent activity - e.g. HOT pruning - doesn't delete tuples out from
1006  * under us. Hence, pin the page until we are done looking at it. We
1007  * also choose to hold sharelock on the buffer throughout --- we could
1008  * release and re-acquire sharelock for each tuple, but since we aren't
1009  * doing much work per tuple, the extra lock traffic is probably better
1010  * avoided.
1011  */
1012  hscan->rs_cblock = blockno;
1013  hscan->rs_cindex = FirstOffsetNumber;
1014  hscan->rs_cbuf = ReadBufferExtended(scan->rs_rd, MAIN_FORKNUM,
1015  blockno, RBM_NORMAL, bstrategy);
1017 
1018  /* in heap all blocks can contain tuples, so always return true */
1019  return true;
1020 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:755
@ RBM_NORMAL
Definition: bufmgr.h:44
#define FirstOffsetNumber
Definition: off.h:27
Relation rs_rd
Definition: relscan.h:34

References BUFFER_LOCK_SHARE, FirstOffsetNumber, LockBuffer(), MAIN_FORKNUM, RBM_NORMAL, ReadBufferExtended(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, and TableScanDescData::rs_rd.

◆ heapam_scan_analyze_next_tuple()

static bool heapam_scan_analyze_next_tuple ( TableScanDesc  scan,
TransactionId  OldestXmin,
double *  liverows,
double *  deadrows,
TupleTableSlot slot 
)
static

Definition at line 1023 of file heapam_handler.c.

1026 {
1027  HeapScanDesc hscan = (HeapScanDesc) scan;
1028  Page targpage;
1029  OffsetNumber maxoffset;
1030  BufferHeapTupleTableSlot *hslot;
1031 
1032  Assert(TTS_IS_BUFFERTUPLE(slot));
1033 
1034  hslot = (BufferHeapTupleTableSlot *) slot;
1035  targpage = BufferGetPage(hscan->rs_cbuf);
1036  maxoffset = PageGetMaxOffsetNumber(targpage);
1037 
1038  /* Inner loop over all tuples on the selected page */
1039  for (; hscan->rs_cindex <= maxoffset; hscan->rs_cindex++)
1040  {
1041  ItemId itemid;
1042  HeapTuple targtuple = &hslot->base.tupdata;
1043  bool sample_it = false;
1044 
1045  itemid = PageGetItemId(targpage, hscan->rs_cindex);
1046 
1047  /*
1048  * We ignore unused and redirect line pointers. DEAD line pointers
1049  * should be counted as dead, because we need vacuum to run to get rid
1050  * of them. Note that this rule agrees with the way that
1051  * heap_page_prune() counts things.
1052  */
1053  if (!ItemIdIsNormal(itemid))
1054  {
1055  if (ItemIdIsDead(itemid))
1056  *deadrows += 1;
1057  continue;
1058  }
1059 
1060  ItemPointerSet(&targtuple->t_self, hscan->rs_cblock, hscan->rs_cindex);
1061 
1062  targtuple->t_tableOid = RelationGetRelid(scan->rs_rd);
1063  targtuple->t_data = (HeapTupleHeader) PageGetItem(targpage, itemid);
1064  targtuple->t_len = ItemIdGetLength(itemid);
1065 
1066  switch (HeapTupleSatisfiesVacuum(targtuple, OldestXmin,
1067  hscan->rs_cbuf))
1068  {
1069  case HEAPTUPLE_LIVE:
1070  sample_it = true;
1071  *liverows += 1;
1072  break;
1073 
1074  case HEAPTUPLE_DEAD:
1076  /* Count dead and recently-dead rows */
1077  *deadrows += 1;
1078  break;
1079 
1081 
1082  /*
1083  * Insert-in-progress rows are not counted. We assume that
1084  * when the inserting transaction commits or aborts, it will
1085  * send a stats message to increment the proper count. This
1086  * works right only if that transaction ends after we finish
1087  * analyzing the table; if things happen in the other order,
1088  * its stats update will be overwritten by ours. However, the
1089  * error will be large only if the other transaction runs long
1090  * enough to insert many tuples, so assuming it will finish
1091  * after us is the safer option.
1092  *
1093  * A special case is that the inserting transaction might be
1094  * our own. In this case we should count and sample the row,
1095  * to accommodate users who load a table and analyze it in one
1096  * transaction. (pgstat_report_analyze has to adjust the
1097  * numbers we report to the cumulative stats system to make
1098  * this come out right.)
1099  */
1101  {
1102  sample_it = true;
1103  *liverows += 1;
1104  }
1105  break;
1106 
1108 
1109  /*
1110  * We count and sample delete-in-progress rows the same as
1111  * live ones, so that the stats counters come out right if the
1112  * deleting transaction commits after us, per the same
1113  * reasoning given above.
1114  *
1115  * If the delete was done by our own transaction, however, we
1116  * must count the row as dead to make pgstat_report_analyze's
1117  * stats adjustments come out right. (Note: this works out
1118  * properly when the row was both inserted and deleted in our
1119  * xact.)
1120  *
1121  * The net effect of these choices is that we act as though an
1122  * IN_PROGRESS transaction hasn't happened yet, except if it
1123  * is our own transaction, which we assume has happened.
1124  *
1125  * This approach ensures that we behave sanely if we see both
1126  * the pre-image and post-image rows for a row being updated
1127  * by a concurrent transaction: we will sample the pre-image
1128  * but not the post-image. We also get sane results if the
1129  * concurrent transaction never commits.
1130  */
1132  *deadrows += 1;
1133  else
1134  {
1135  sample_it = true;
1136  *liverows += 1;
1137  }
1138  break;
1139 
1140  default:
1141  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1142  break;
1143  }
1144 
1145  if (sample_it)
1146  {
1147  ExecStoreBufferHeapTuple(targtuple, slot, hscan->rs_cbuf);
1148  hscan->rs_cindex++;
1149 
1150  /* note that we leave the buffer locked here! */
1151  return true;
1152  }
1153  }
1154 
1155  /* Now release the lock and pin on the page */
1156  UnlockReleaseBuffer(hscan->rs_cbuf);
1157  hscan->rs_cbuf = InvalidBuffer;
1158 
1159  /* also prevent old slot contents from having pin on page */
1160  ExecClearTuple(slot);
1161 
1162  return false;
1163 }
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4497
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:432

References Assert(), BufferGetPage(), elog(), ERROR, ExecClearTuple(), ExecStoreBufferHeapTuple(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleSatisfiesVacuum(), InvalidBuffer, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemPointerSet(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), RelationGetRelid, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, TableScanDescData::rs_rd, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdIsCurrentTransactionId(), TTS_IS_BUFFERTUPLE, and UnlockReleaseBuffer().

◆ heapam_scan_bitmap_next_block()

static bool heapam_scan_bitmap_next_block ( TableScanDesc  scan,
TBMIterateResult tbmres 
)
static

Definition at line 2115 of file heapam_handler.c.

2117 {
2118  HeapScanDesc hscan = (HeapScanDesc) scan;
2119  BlockNumber block = tbmres->blockno;
2120  Buffer buffer;
2121  Snapshot snapshot;
2122  int ntup;
2123 
2124  hscan->rs_cindex = 0;
2125  hscan->rs_ntuples = 0;
2126 
2127  /*
2128  * Ignore any claimed entries past what we think is the end of the
2129  * relation. It may have been extended after the start of our scan (we
2130  * only hold an AccessShareLock, and it could be inserts from this
2131  * backend). We don't take this optimization in SERIALIZABLE isolation
2132  * though, as we need to examine all invisible tuples reachable by the
2133  * index.
2134  */
2135  if (!IsolationIsSerializable() && block >= hscan->rs_nblocks)
2136  return false;
2137 
2138  /*
2139  * Acquire pin on the target heap page, trading in any pin we held before.
2140  */
2141  hscan->rs_cbuf = ReleaseAndReadBuffer(hscan->rs_cbuf,
2142  scan->rs_rd,
2143  block);
2144  hscan->rs_cblock = block;
2145  buffer = hscan->rs_cbuf;
2146  snapshot = scan->rs_snapshot;
2147 
2148  ntup = 0;
2149 
2150  /*
2151  * Prune and repair fragmentation for the whole page, if possible.
2152  */
2153  heap_page_prune_opt(scan->rs_rd, buffer);
2154 
2155  /*
2156  * We must hold share lock on the buffer content while examining tuple
2157  * visibility. Afterwards, however, the tuples we have found to be
2158  * visible are guaranteed good as long as we hold the buffer pin.
2159  */
2160  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2161 
2162  /*
2163  * We need two separate strategies for lossy and non-lossy cases.
2164  */
2165  if (tbmres->ntuples >= 0)
2166  {
2167  /*
2168  * Bitmap is non-lossy, so we just look through the offsets listed in
2169  * tbmres; but we have to follow any HOT chain starting at each such
2170  * offset.
2171  */
2172  int curslot;
2173 
2174  for (curslot = 0; curslot < tbmres->ntuples; curslot++)
2175  {
2176  OffsetNumber offnum = tbmres->offsets[curslot];
2177  ItemPointerData tid;
2178  HeapTupleData heapTuple;
2179 
2180  ItemPointerSet(&tid, block, offnum);
2181  if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
2182  &heapTuple, NULL, true))
2183  hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
2184  }
2185  }
2186  else
2187  {
2188  /*
2189  * Bitmap is lossy, so we must examine each line pointer on the page.
2190  * But we can ignore HOT chains, since we'll check each tuple anyway.
2191  */
2192  Page page = BufferGetPage(buffer);
2193  OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
2194  OffsetNumber offnum;
2195 
2196  for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
2197  {
2198  ItemId lp;
2199  HeapTupleData loctup;
2200  bool valid;
2201 
2202  lp = PageGetItemId(page, offnum);
2203  if (!ItemIdIsNormal(lp))
2204  continue;
2205  loctup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2206  loctup.t_len = ItemIdGetLength(lp);
2207  loctup.t_tableOid = scan->rs_rd->rd_id;
2208  ItemPointerSet(&loctup.t_self, block, offnum);
2209  valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
2210  if (valid)
2211  {
2212  hscan->rs_vistuples[ntup++] = offnum;
2213  PredicateLockTID(scan->rs_rd, &loctup.t_self, snapshot,
2214  HeapTupleHeaderGetXmin(loctup.t_data));
2215  }
2216  HeapCheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
2217  buffer, snapshot);
2218  }
2219  }
2220 
2221  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2222 
2223  Assert(ntup <= MaxHeapTuplesPerPage);
2224  hscan->rs_ntuples = ntup;
2225 
2226  return ntup > 0;
2227 }
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:10139
bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot, Buffer buffer)
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2555
int rs_ntuples
Definition: heapam.h:77
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
Definition: heapam.h:78
Oid rd_id
Definition: rel.h:113
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]
Definition: tidbitmap.h:46
BlockNumber blockno
Definition: tidbitmap.h:42
#define IsolationIsSerializable()
Definition: xact.h:52

References Assert(), TBMIterateResult::blockno, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage(), FirstOffsetNumber, heap_hot_search_buffer(), heap_page_prune_opt(), HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetXmin, HeapTupleSatisfiesVisibility(), IsolationIsSerializable, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetOffsetNumber(), ItemPointerSet(), LockBuffer(), MaxHeapTuplesPerPage, TBMIterateResult::ntuples, OffsetNumberNext, TBMIterateResult::offsets, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PredicateLockTID(), RelationData::rd_id, ReleaseAndReadBuffer(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, HeapScanDescData::rs_nblocks, HeapScanDescData::rs_ntuples, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_vistuples, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

◆ heapam_scan_bitmap_next_tuple()

static bool heapam_scan_bitmap_next_tuple ( TableScanDesc  scan,
TBMIterateResult tbmres,
TupleTableSlot slot 
)
static

Definition at line 2230 of file heapam_handler.c.

2233 {
2234  HeapScanDesc hscan = (HeapScanDesc) scan;
2235  OffsetNumber targoffset;
2236  Page page;
2237  ItemId lp;
2238 
2239  /*
2240  * Out of range? If so, nothing more to look at on this page
2241  */
2242  if (hscan->rs_cindex < 0 || hscan->rs_cindex >= hscan->rs_ntuples)
2243  return false;
2244 
2245  targoffset = hscan->rs_vistuples[hscan->rs_cindex];
2246  page = BufferGetPage(hscan->rs_cbuf);
2247  lp = PageGetItemId(page, targoffset);
2248  Assert(ItemIdIsNormal(lp));
2249 
2250  hscan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2251  hscan->rs_ctup.t_len = ItemIdGetLength(lp);
2252  hscan->rs_ctup.t_tableOid = scan->rs_rd->rd_id;
2253  ItemPointerSet(&hscan->rs_ctup.t_self, hscan->rs_cblock, targoffset);
2254 
2256 
2257  /*
2258  * Set up the result slot to point to this tuple. Note that the slot
2259  * acquires a pin on the buffer.
2260  */
2262  slot,
2263  hscan->rs_cbuf);
2264 
2265  hscan->rs_cindex++;
2266 
2267  return true;
2268 }
#define pgstat_count_heap_fetch(rel)
Definition: pgstat.h:618
HeapTupleData rs_ctup
Definition: heapam.h:67

References Assert(), BufferGetPage(), ExecStoreBufferHeapTuple(), ItemIdGetLength, ItemIdIsNormal, ItemPointerSet(), PageGetItem(), PageGetItemId(), pgstat_count_heap_fetch, RelationData::rd_id, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_cindex, HeapScanDescData::rs_ctup, HeapScanDescData::rs_ntuples, TableScanDescData::rs_rd, HeapScanDescData::rs_vistuples, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

◆ heapam_scan_get_blocks_done()

static BlockNumber heapam_scan_get_blocks_done ( HeapScanDesc  hscan)
static

Definition at line 1988 of file heapam_handler.c.

1989 {
1990  ParallelBlockTableScanDesc bpscan = NULL;
1991  BlockNumber startblock;
1992  BlockNumber blocks_done;
1993 
1994  if (hscan->rs_base.rs_parallel != NULL)
1995  {
1997  startblock = bpscan->phs_startblock;
1998  }
1999  else
2000  startblock = hscan->rs_startblock;
2001 
2002  /*
2003  * Might have wrapped around the end of the relation, if startblock was
2004  * not zero.
2005  */
2006  if (hscan->rs_cblock > startblock)
2007  blocks_done = hscan->rs_cblock - startblock;
2008  else
2009  {
2010  BlockNumber nblocks;
2011 
2012  nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
2013  blocks_done = nblocks - startblock +
2014  hscan->rs_cblock;
2015  }
2016 
2017  return blocks_done;
2018 }

References ParallelBlockTableScanDescData::phs_nblocks, ParallelBlockTableScanDescData::phs_startblock, HeapScanDescData::rs_base, HeapScanDescData::rs_cblock, HeapScanDescData::rs_nblocks, TableScanDescData::rs_parallel, and HeapScanDescData::rs_startblock.

Referenced by heapam_index_build_range_scan().

◆ heapam_scan_sample_next_block()

static bool heapam_scan_sample_next_block ( TableScanDesc  scan,
SampleScanState scanstate 
)
static

Definition at line 2271 of file heapam_handler.c.

2272 {
2273  HeapScanDesc hscan = (HeapScanDesc) scan;
2274  TsmRoutine *tsm = scanstate->tsmroutine;
2275  BlockNumber blockno;
2276 
2277  /* return false immediately if relation is empty */
2278  if (hscan->rs_nblocks == 0)
2279  return false;
2280 
2281  if (tsm->NextSampleBlock)
2282  {
2283  blockno = tsm->NextSampleBlock(scanstate, hscan->rs_nblocks);
2284  hscan->rs_cblock = blockno;
2285  }
2286  else
2287  {
2288  /* scanning table sequentially */
2289 
2290  if (hscan->rs_cblock == InvalidBlockNumber)
2291  {
2292  Assert(!hscan->rs_inited);
2293  blockno = hscan->rs_startblock;
2294  }
2295  else
2296  {
2297  Assert(hscan->rs_inited);
2298 
2299  blockno = hscan->rs_cblock + 1;
2300 
2301  if (blockno >= hscan->rs_nblocks)
2302  {
2303  /* wrap to beginning of rel, might not have started at 0 */
2304  blockno = 0;
2305  }
2306 
2307  /*
2308  * Report our new scan position for synchronization purposes.
2309  *
2310  * Note: we do this before checking for end of scan so that the
2311  * final state of the position hint is back at the start of the
2312  * rel. That's not strictly necessary, but otherwise when you run
2313  * the same query multiple times the starting position would shift
2314  * a little bit backwards on every invocation, which is confusing.
2315  * We don't guarantee any specific ordering in general, though.
2316  */
2317  if (scan->rs_flags & SO_ALLOW_SYNC)
2318  ss_report_location(scan->rs_rd, blockno);
2319 
2320  if (blockno == hscan->rs_startblock)
2321  {
2322  blockno = InvalidBlockNumber;
2323  }
2324  }
2325  }
2326 
2327  if (!BlockNumberIsValid(blockno))
2328  {
2329  if (BufferIsValid(hscan->rs_cbuf))
2330  ReleaseBuffer(hscan->rs_cbuf);
2331  hscan->rs_cbuf = InvalidBuffer;
2332  hscan->rs_cblock = InvalidBlockNumber;
2333  hscan->rs_inited = false;
2334 
2335  return false;
2336  }
2337 
2338  heapgetpage(scan, blockno);
2339  hscan->rs_inited = true;
2340 
2341  return true;
2342 }
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
void heapgetpage(TableScanDesc sscan, BlockNumber block)
Definition: heapam.c:377
bool rs_inited
Definition: heapam.h:59
struct TsmRoutine * tsmroutine
Definition: execnodes.h:1500
uint32 rs_flags
Definition: relscan.h:47
NextSampleBlock_function NextSampleBlock
Definition: tsmapi.h:73
void ss_report_location(Relation rel, BlockNumber location)
Definition: syncscan.c:289
@ SO_ALLOW_SYNC
Definition: tableam.h:59

References Assert(), BlockNumberIsValid(), BufferIsValid(), heapgetpage(), InvalidBlockNumber, InvalidBuffer, TsmRoutine::NextSampleBlock, ReleaseBuffer(), HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, TableScanDescData::rs_flags, HeapScanDescData::rs_inited, HeapScanDescData::rs_nblocks, TableScanDescData::rs_rd, HeapScanDescData::rs_startblock, SO_ALLOW_SYNC, ss_report_location(), and SampleScanState::tsmroutine.

◆ heapam_scan_sample_next_tuple()

static bool heapam_scan_sample_next_tuple ( TableScanDesc  scan,
SampleScanState scanstate,
TupleTableSlot slot 
)
static

Definition at line 2345 of file heapam_handler.c.

2347 {
2348  HeapScanDesc hscan = (HeapScanDesc) scan;
2349  TsmRoutine *tsm = scanstate->tsmroutine;
2350  BlockNumber blockno = hscan->rs_cblock;
2351  bool pagemode = (scan->rs_flags & SO_ALLOW_PAGEMODE) != 0;
2352 
2353  Page page;
2354  bool all_visible;
2355  OffsetNumber maxoffset;
2356 
2357  /*
2358  * When not using pagemode, we must lock the buffer during tuple
2359  * visibility checks.
2360  */
2361  if (!pagemode)
2363 
2364  page = (Page) BufferGetPage(hscan->rs_cbuf);
2365  all_visible = PageIsAllVisible(page) &&
2367  maxoffset = PageGetMaxOffsetNumber(page);
2368 
2369  for (;;)
2370  {
2371  OffsetNumber tupoffset;
2372 
2374 
2375  /* Ask the tablesample method which tuples to check on this page. */
2376  tupoffset = tsm->NextSampleTuple(scanstate,
2377  blockno,
2378  maxoffset);
2379 
2380  if (OffsetNumberIsValid(tupoffset))
2381  {
2382  ItemId itemid;
2383  bool visible;
2384  HeapTuple tuple = &(hscan->rs_ctup);
2385 
2386  /* Skip invalid tuple pointers. */
2387  itemid = PageGetItemId(page, tupoffset);
2388  if (!ItemIdIsNormal(itemid))
2389  continue;
2390 
2391  tuple->t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2392  tuple->t_len = ItemIdGetLength(itemid);
2393  ItemPointerSet(&(tuple->t_self), blockno, tupoffset);
2394 
2395 
2396  if (all_visible)
2397  visible = true;
2398  else
2399  visible = SampleHeapTupleVisible(scan, hscan->rs_cbuf,
2400  tuple, tupoffset);
2401 
2402  /* in pagemode, heapgetpage did this for us */
2403  if (!pagemode)
2404  HeapCheckForSerializableConflictOut(visible, scan->rs_rd, tuple,
2405  hscan->rs_cbuf, scan->rs_snapshot);
2406 
2407  /* Try next tuple from same page. */
2408  if (!visible)
2409  continue;
2410 
2411  /* Found visible tuple, return it. */
2412  if (!pagemode)
2414 
2415  ExecStoreBufferHeapTuple(tuple, slot, hscan->rs_cbuf);
2416 
2417  /* Count successfully-fetched tuples as heap fetches */
2419 
2420  return true;
2421  }
2422  else
2423  {
2424  /*
2425  * If we get here, it means we've exhausted the items on this page
2426  * and it's time to move to the next.
2427  */
2428  if (!pagemode)
2430 
2431  ExecClearTuple(slot);
2432  return false;
2433  }
2434  }
2435 
2436  Assert(0);
2437 }
static bool PageIsAllVisible(Page page)
Definition: bufpage.h:426
static bool SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer, HeapTuple tuple, OffsetNumber tupoffset)
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:613
bool takenDuringRecovery
Definition: snapshot.h:184
NextSampleTuple_function NextSampleTuple
Definition: tsmapi.h:74
@ SO_ALLOW_PAGEMODE
Definition: tableam.h:61

References Assert(), BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage(), CHECK_FOR_INTERRUPTS, ExecClearTuple(), ExecStoreBufferHeapTuple(), HeapCheckForSerializableConflictOut(), ItemIdGetLength, ItemIdIsNormal, ItemPointerSet(), LockBuffer(), TsmRoutine::NextSampleTuple, OffsetNumberIsValid, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageIsAllVisible(), pgstat_count_heap_getnext, HeapScanDescData::rs_cblock, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, SampleHeapTupleVisible(), SO_ALLOW_PAGEMODE, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, SnapshotData::takenDuringRecovery, and SampleScanState::tsmroutine.

◆ heapam_slot_callbacks()

static const TupleTableSlotOps* heapam_slot_callbacks ( Relation  relation)
static

Definition at line 67 of file heapam_handler.c.

68 {
69  return &TTSOpsBufferHeapTuple;
70 }
const TupleTableSlotOps TTSOpsBufferHeapTuple
Definition: execTuples.c:86

References TTSOpsBufferHeapTuple.

◆ heapam_tuple_complete_speculative()

static void heapam_tuple_complete_speculative ( Relation  relation,
TupleTableSlot slot,
uint32  specToken,
bool  succeeded 
)
static

Definition at line 283 of file heapam_handler.c.

285 {
286  bool shouldFree = true;
287  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
288 
289  /* adjust the tuple's state accordingly */
290  if (succeeded)
291  heap_finish_speculative(relation, &slot->tts_tid);
292  else
293  heap_abort_speculative(relation, &slot->tts_tid);
294 
295  if (shouldFree)
296  pfree(tuple);
297 }
void heap_finish_speculative(Relation relation, ItemPointer tid)
Definition: heapam.c:5635
void heap_abort_speculative(Relation relation, ItemPointer tid)
Definition: heapam.c:5722
ItemPointerData tts_tid
Definition: tuptable.h:129

References ExecFetchSlotHeapTuple(), heap_abort_speculative(), heap_finish_speculative(), pfree(), and TupleTableSlot::tts_tid.

◆ heapam_tuple_delete()

static TM_Result heapam_tuple_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  snapshot,
Snapshot  crosscheck,
bool  wait,
TM_FailureData tmfd,
bool  changingPart 
)
static

Definition at line 300 of file heapam_handler.c.

303 {
304  /*
305  * Currently Deleting of index tuples are handled at vacuum, in case if
306  * the storage itself is cleaning the dead tuples by itself, it is the
307  * time to call the index tuple deletion also.
308  */
309  return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart);
310 }
TM_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: heapam.c:2506

References heap_delete().

◆ heapam_tuple_insert()

static void heapam_tuple_insert ( Relation  relation,
TupleTableSlot slot,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)
static

Definition at line 241 of file heapam_handler.c.

243 {
244  bool shouldFree = true;
245  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
246 
247  /* Update the tuple with table oid */
248  slot->tts_tableOid = RelationGetRelid(relation);
249  tuple->t_tableOid = slot->tts_tableOid;
250 
251  /* Perform the insertion, and copy the resulting ItemPointer */
252  heap_insert(relation, tuple, cid, options, bistate);
253  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
254 
255  if (shouldFree)
256  pfree(tuple);
257 }
void heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
Definition: heapam.c:1817
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
Definition: itemptr.h:172

References ExecFetchSlotHeapTuple(), heap_insert(), ItemPointerCopy(), pfree(), RelationGetRelid, HeapTupleData::t_self, HeapTupleData::t_tableOid, TupleTableSlot::tts_tableOid, and TupleTableSlot::tts_tid.

◆ heapam_tuple_insert_speculative()

static void heapam_tuple_insert_speculative ( Relation  relation,
TupleTableSlot slot,
CommandId  cid,
int  options,
BulkInsertState  bistate,
uint32  specToken 
)
static

Definition at line 260 of file heapam_handler.c.

263 {
264  bool shouldFree = true;
265  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
266 
267  /* Update the tuple with table oid */
268  slot->tts_tableOid = RelationGetRelid(relation);
269  tuple->t_tableOid = slot->tts_tableOid;
270 
271  HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
273 
274  /* Perform the insertion, and copy the resulting ItemPointer */
275  heap_insert(relation, tuple, cid, options, bistate);
276  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
277 
278  if (shouldFree)
279  pfree(tuple);
280 }
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:37
#define HeapTupleHeaderSetSpeculativeToken(tup, token)
Definition: htup_details.h:439

References ExecFetchSlotHeapTuple(), heap_insert(), HEAP_INSERT_SPECULATIVE, HeapTupleHeaderSetSpeculativeToken, ItemPointerCopy(), pfree(), RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_self, HeapTupleData::t_tableOid, TupleTableSlot::tts_tableOid, and TupleTableSlot::tts_tid.

◆ heapam_tuple_lock()

static TM_Result heapam_tuple_lock ( Relation  relation,
ItemPointer  tid,
Snapshot  snapshot,
TupleTableSlot slot,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
uint8  flags,
TM_FailureData tmfd 
)
static

Definition at line 359 of file heapam_handler.c.

363 {
365  TM_Result result;
366  Buffer buffer;
367  HeapTuple tuple = &bslot->base.tupdata;
368  bool follow_updates;
369 
370  follow_updates = (flags & TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS) != 0;
371  tmfd->traversed = false;
372 
373  Assert(TTS_IS_BUFFERTUPLE(slot));
374 
375 tuple_lock_retry:
376  tuple->t_self = *tid;
377  result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy,
378  follow_updates, &buffer, tmfd);
379 
380  if (result == TM_Updated &&
382  {
383  /* Should not encounter speculative tuple on recheck */
385 
386  ReleaseBuffer(buffer);
387 
388  if (!ItemPointerEquals(&tmfd->ctid, &tuple->t_self))
389  {
390  SnapshotData SnapshotDirty;
391  TransactionId priorXmax;
392 
393  /* it was updated, so look at the updated version */
394  *tid = tmfd->ctid;
395  /* updated row should have xmin matching this xmax */
396  priorXmax = tmfd->xmax;
397 
398  /* signal that a tuple later in the chain is getting locked */
399  tmfd->traversed = true;
400 
401  /*
402  * fetch target tuple
403  *
404  * Loop here to deal with updated or busy tuples
405  */
406  InitDirtySnapshot(SnapshotDirty);
407  for (;;)
408  {
410  ereport(ERROR,
412  errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
413 
414  tuple->t_self = *tid;
415  if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer, true))
416  {
417  /*
418  * If xmin isn't what we're expecting, the slot must have
419  * been recycled and reused for an unrelated tuple. This
420  * implies that the latest version of the row was deleted,
421  * so we need do nothing. (Should be safe to examine xmin
422  * without getting buffer's content lock. We assume
423  * reading a TransactionId to be atomic, and Xmin never
424  * changes in an existing tuple, except to invalid or
425  * frozen, and neither of those can match priorXmax.)
426  */
428  priorXmax))
429  {
430  ReleaseBuffer(buffer);
431  return TM_Deleted;
432  }
433 
434  /* otherwise xmin should not be dirty... */
435  if (TransactionIdIsValid(SnapshotDirty.xmin))
436  ereport(ERROR,
438  errmsg_internal("t_xmin %u is uncommitted in tuple (%u,%u) to be updated in table \"%s\"",
439  SnapshotDirty.xmin,
442  RelationGetRelationName(relation))));
443 
444  /*
445  * If tuple is being updated by other transaction then we
446  * have to wait for its commit/abort, or die trying.
447  */
448  if (TransactionIdIsValid(SnapshotDirty.xmax))
449  {
450  ReleaseBuffer(buffer);
451  switch (wait_policy)
452  {
453  case LockWaitBlock:
454  XactLockTableWait(SnapshotDirty.xmax,
455  relation, &tuple->t_self,
457  break;
458  case LockWaitSkip:
459  if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
460  /* skip instead of waiting */
461  return TM_WouldBlock;
462  break;
463  case LockWaitError:
464  if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
465  ereport(ERROR,
466  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
467  errmsg("could not obtain lock on row in relation \"%s\"",
468  RelationGetRelationName(relation))));
469  break;
470  }
471  continue; /* loop back to repeat heap_fetch */
472  }
473 
474  /*
475  * If tuple was inserted by our own transaction, we have
476  * to check cmin against cid: cmin >= current CID means
477  * our command cannot see the tuple, so we should ignore
478  * it. Otherwise heap_lock_tuple() will throw an error,
479  * and so would any later attempt to update or delete the
480  * tuple. (We need not check cmax because
481  * HeapTupleSatisfiesDirty will consider a tuple deleted
482  * by our transaction dead, regardless of cmax.) We just
483  * checked that priorXmax == xmin, so we can test that
484  * variable instead of doing HeapTupleHeaderGetXmin again.
485  */
486  if (TransactionIdIsCurrentTransactionId(priorXmax) &&
487  HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
488  {
489  tmfd->xmax = priorXmax;
490 
491  /*
492  * Cmin is the problematic value, so store that. See
493  * above.
494  */
495  tmfd->cmax = HeapTupleHeaderGetCmin(tuple->t_data);
496  ReleaseBuffer(buffer);
497  return TM_SelfModified;
498  }
499 
500  /*
501  * This is a live tuple, so try to lock it again.
502  */
503  ReleaseBuffer(buffer);
504  goto tuple_lock_retry;
505  }
506 
507  /*
508  * If the referenced slot was actually empty, the latest
509  * version of the row must have been deleted, so we need do
510  * nothing.
511  */
512  if (tuple->t_data == NULL)
513  {
514  Assert(!BufferIsValid(buffer));
515  return TM_Deleted;
516  }
517 
518  /*
519  * As above, if xmin isn't what we're expecting, do nothing.
520  */
522  priorXmax))
523  {
524  ReleaseBuffer(buffer);
525  return TM_Deleted;
526  }
527 
528  /*
529  * If we get here, the tuple was found but failed
530  * SnapshotDirty. Assuming the xmin is either a committed xact
531  * or our own xact (as it certainly should be if we're trying
532  * to modify the tuple), this must mean that the row was
533  * updated or deleted by either a committed xact or our own
534  * xact. If it was deleted, we can ignore it; if it was
535  * updated then chain up to the next version and repeat the
536  * whole process.
537  *
538  * As above, it should be safe to examine xmax and t_ctid
539  * without the buffer content lock, because they can't be
540  * changing. We'd better hold a buffer pin though.
541  */
542  if (ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
543  {
544  /* deleted, so forget about it */
545  ReleaseBuffer(buffer);
546  return TM_Deleted;
547  }
548 
549  /* updated, so look at the updated row */
550  *tid = tuple->t_data->t_ctid;
551  /* updated row should have xmin matching this xmax */
552  priorXmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
553  ReleaseBuffer(buffer);
554  /* loop back to fetch next in chain */
555  }
556  }
557  else
558  {
559  /* tuple was deleted, so give up */
560  return TM_Deleted;
561  }
562  }
563 
564  slot->tts_tableOid = RelationGetRelid(relation);
565  tuple->t_tableOid = slot->tts_tableOid;
566 
567  /* store in slot, transferring existing pin */
568  ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
569 
570  return result;
571 }
CommandId HeapTupleHeaderGetCmin(HeapTupleHeader tup)
Definition: combocid.c:104
int errmsg(const char *fmt,...)
Definition: elog.c:1069
TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
Definition: heapam.c:4126
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:428
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:35
static bool ItemPointerIndicatesMovedPartitions(const ItemPointerData *pointer)
Definition: itemptr.h:197
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:741
@ XLTW_FetchUpdated
Definition: lmgr.h:33
@ LockWaitSkip
Definition: lockoptions.h:41
@ LockWaitBlock
Definition: lockoptions.h:39
@ LockWaitError
Definition: lockoptions.h:43
static PgChecksumMode mode
Definition: pg_checksums.c:56
#define ERRCODE_T_R_SERIALIZATION_FAILURE
Definition: pgbench.c:76
#define InitDirtySnapshot(snapshotdata)
Definition: snapmgr.h:40
ItemPointerData t_ctid
Definition: htup_details.h:161
TransactionId xmin
Definition: snapshot.h:157
TransactionId xmax
Definition: snapshot.h:158
bool traversed
Definition: tableam.h:145
TransactionId xmax
Definition: tableam.h:143
CommandId cmax
Definition: tableam.h:144
ItemPointerData ctid
Definition: tableam.h:142
TM_Result
Definition: tableam.h:72
@ TM_Deleted
Definition: tableam.h:92
@ TM_WouldBlock
Definition: tableam.h:102
@ TM_Updated
Definition: tableam.h:89
@ TM_SelfModified
Definition: tableam.h:83
#define TUPLE_LOCK_FLAG_FIND_LAST_VERSION
Definition: tableam.h:260
#define TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS
Definition: tableam.h:258
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43

References Assert(), BufferIsValid(), TM_FailureData::cmax, ConditionalXactLockTableWait(), TM_FailureData::ctid, ereport, errcode(), ERRCODE_DATA_CORRUPTED, ERRCODE_T_R_SERIALIZATION_FAILURE, errmsg(), errmsg_internal(), ERROR, ExecStorePinnedBufferHeapTuple(), heap_fetch(), heap_lock_tuple(), HeapTupleHeaderGetCmin(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIsSpeculative, InitDirtySnapshot, ItemPointerEquals(), ItemPointerGetBlockNumber(), ItemPointerGetOffsetNumber(), ItemPointerIndicatesMovedPartitions(), LockWaitBlock, LockWaitError, LockWaitSkip, mode, RelationGetRelationName, RelationGetRelid, ReleaseBuffer(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_Deleted, TM_SelfModified, TM_Updated, TM_WouldBlock, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TransactionIdIsValid, TM_FailureData::traversed, TTS_IS_BUFFERTUPLE, TupleTableSlot::tts_tableOid, TUPLE_LOCK_FLAG_FIND_LAST_VERSION, TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS, XactLockTableWait(), XLTW_FetchUpdated, TM_FailureData::xmax, SnapshotData::xmax, and SnapshotData::xmin.

◆ heapam_tuple_satisfies_snapshot()

static bool heapam_tuple_satisfies_snapshot ( Relation  rel,
TupleTableSlot slot,
Snapshot  snapshot 
)
static

Definition at line 213 of file heapam_handler.c.

215 {
217  bool res;
218 
219  Assert(TTS_IS_BUFFERTUPLE(slot));
220  Assert(BufferIsValid(bslot->buffer));
221 
222  /*
223  * We need buffer pin and lock to call HeapTupleSatisfiesVisibility.
224  * Caller should be holding pin, but not lock.
225  */
227  res = HeapTupleSatisfiesVisibility(bslot->base.tuple, snapshot,
228  bslot->buffer);
230 
231  return res;
232 }

References Assert(), BufferHeapTupleTableSlot::buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferIsValid(), HeapTupleSatisfiesVisibility(), LockBuffer(), res, and TTS_IS_BUFFERTUPLE.

◆ heapam_tuple_tid_valid()

static bool heapam_tuple_tid_valid ( TableScanDesc  scan,
ItemPointer  tid 
)
static

Definition at line 204 of file heapam_handler.c.

205 {
206  HeapScanDesc hscan = (HeapScanDesc) scan;
207 
208  return ItemPointerIsValid(tid) &&
210 }
static bool ItemPointerIsValid(const ItemPointerData *pointer)
Definition: itemptr.h:83

References ItemPointerGetBlockNumber(), ItemPointerIsValid(), and HeapScanDescData::rs_nblocks.

◆ heapam_tuple_update()

static TM_Result heapam_tuple_update ( Relation  relation,
ItemPointer  otid,
TupleTableSlot slot,
CommandId  cid,
Snapshot  snapshot,
Snapshot  crosscheck,
bool  wait,
TM_FailureData tmfd,
LockTupleMode lockmode,
TU_UpdateIndexes update_indexes 
)
static

Definition at line 314 of file heapam_handler.c.

318 {
319  bool shouldFree = true;
320  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
321  TM_Result result;
322 
323  /* Update the tuple with table oid */
324  slot->tts_tableOid = RelationGetRelid(relation);
325  tuple->t_tableOid = slot->tts_tableOid;
326 
327  result = heap_update(relation, otid, tuple, cid, crosscheck, wait,
328  tmfd, lockmode, update_indexes);
329  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
330 
331  /*
332  * Decide whether new index entries are needed for the tuple
333  *
334  * Note: heap_update returns the tid (location) of the new tuple in the
335  * t_self field.
336  *
337  * If the update is not HOT, we must update all indexes. If the update is
338  * HOT, it could be that we updated summarized columns, so we either
339  * update only summarized indexes, or none at all.
340  */
341  if (result != TM_Ok)
342  {
343  Assert(*update_indexes == TU_None);
344  *update_indexes = TU_None;
345  }
346  else if (!HeapTupleIsHeapOnly(tuple))
347  Assert(*update_indexes == TU_All);
348  else
349  Assert((*update_indexes == TU_Summarizing) ||
350  (*update_indexes == TU_None));
351 
352  if (shouldFree)
353  pfree(tuple);
354 
355  return result;
356 }
TM_Result heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
Definition: heapam.c:2968
@ TU_Summarizing
Definition: tableam.h:118
@ TU_All
Definition: tableam.h:115
@ TU_None
Definition: tableam.h:112
@ TM_Ok
Definition: tableam.h:77

References Assert(), ExecFetchSlotHeapTuple(), heap_update(), HeapTupleIsHeapOnly, ItemPointerCopy(), pfree(), RelationGetRelid, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_Ok, TupleTableSlot::tts_tableOid, TupleTableSlot::tts_tid, TU_All, TU_None, and TU_Summarizing.

◆ reform_and_rewrite_tuple()

static void reform_and_rewrite_tuple ( HeapTuple  tuple,
Relation  OldHeap,
Relation  NewHeap,
Datum values,
bool isnull,
RewriteState  rwstate 
)
static

Definition at line 2462 of file heapam_handler.c.

2465 {
2466  TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
2467  TupleDesc newTupDesc = RelationGetDescr(NewHeap);
2468  HeapTuple copiedTuple;
2469  int i;
2470 
2471  heap_deform_tuple(tuple, oldTupDesc, values, isnull);
2472 
2473  /* Be sure to null out any dropped columns */
2474  for (i = 0; i < newTupDesc->natts; i++)
2475  {
2476  if (TupleDescAttr(newTupDesc, i)->attisdropped)
2477  isnull[i] = true;
2478  }
2479 
2480  copiedTuple = heap_form_tuple(newTupDesc, values, isnull);
2481 
2482  /* The heap rewrite module does the rest */
2483  rewrite_heap_tuple(rwstate, tuple, copiedTuple);
2484 
2485  heap_freetuple(copiedTuple);
2486 }
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:1108
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1337
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1426
void rewrite_heap_tuple(RewriteState state, HeapTuple old_tuple, HeapTuple new_tuple)
Definition: rewriteheap.c:360

References heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), i, TupleDescData::natts, RelationGetDescr, rewrite_heap_tuple(), TupleDescAttr, and values.

Referenced by heapam_relation_copy_for_cluster().

◆ SampleHeapTupleVisible()

static bool SampleHeapTupleVisible ( TableScanDesc  scan,
Buffer  buffer,
HeapTuple  tuple,
OffsetNumber  tupoffset 
)
static

Definition at line 2492 of file heapam_handler.c.

2495 {
2496  HeapScanDesc hscan = (HeapScanDesc) scan;
2497 
2498  if (scan->rs_flags & SO_ALLOW_PAGEMODE)
2499  {
2500  /*
2501  * In pageatatime mode, heapgetpage() already did visibility checks,
2502  * so just look at the info it left in rs_vistuples[].
2503  *
2504  * We use a binary search over the known-sorted array. Note: we could
2505  * save some effort if we insisted that NextSampleTuple select tuples
2506  * in increasing order, but it's not clear that there would be enough
2507  * gain to justify the restriction.
2508  */
2509  int start = 0,
2510  end = hscan->rs_ntuples - 1;
2511 
2512  while (start <= end)
2513  {
2514  int mid = (start + end) / 2;
2515  OffsetNumber curoffset = hscan->rs_vistuples[mid];
2516 
2517  if (tupoffset == curoffset)
2518  return true;
2519  else if (tupoffset < curoffset)
2520  end = mid - 1;
2521  else
2522  start = mid + 1;
2523  }
2524 
2525  return false;
2526  }
2527  else
2528  {
2529  /* Otherwise, we have to check the tuple individually. */
2530  return HeapTupleSatisfiesVisibility(tuple, scan->rs_snapshot,
2531  buffer);
2532  }
2533 }

References HeapTupleSatisfiesVisibility(), TableScanDescData::rs_flags, HeapScanDescData::rs_ntuples, TableScanDescData::rs_snapshot, HeapScanDescData::rs_vistuples, and SO_ALLOW_PAGEMODE.

Referenced by heapam_scan_sample_next_tuple().

Variable Documentation

◆ heapam_methods

static const TableAmRoutine heapam_methods
static

Definition at line 58 of file heapam_handler.c.

Referenced by GetHeapamTableAmRoutine(), and heap_tableam_handler().