PostgreSQL Source Code  git master
tableam.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/syncscan.h"
#include "access/tableam.h"
#include "access/xact.h"
#include "optimizer/plancat.h"
#include "port/pg_bitutils.h"
#include "storage/bufmgr.h"
#include "storage/shmem.h"
#include "storage/smgr.h"
Include dependency graph for tableam.c:

Go to the source code of this file.

Macros

#define PARALLEL_SEQSCAN_NCHUNKS   2048
 
#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS   64
 
#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE   8192
 

Functions

const TupleTableSlotOpstable_slot_callbacks (Relation relation)
 
TupleTableSlottable_slot_create (Relation relation, List **reglist)
 
TableScanDesc table_beginscan_catalog (Relation relation, int nkeys, struct ScanKeyData *key)
 
void table_scan_update_snapshot (TableScanDesc scan, Snapshot snapshot)
 
Size table_parallelscan_estimate (Relation rel, Snapshot snapshot)
 
void table_parallelscan_initialize (Relation rel, ParallelTableScanDesc pscan, Snapshot snapshot)
 
TableScanDesc table_beginscan_parallel (Relation relation, ParallelTableScanDesc parallel_scan)
 
bool table_index_fetch_tuple_check (Relation rel, ItemPointer tid, Snapshot snapshot, bool *all_dead)
 
void table_tuple_get_latest_tid (TableScanDesc scan, ItemPointer tid)
 
void simple_table_tuple_insert (Relation rel, TupleTableSlot *slot)
 
void simple_table_tuple_delete (Relation rel, ItemPointer tid, Snapshot snapshot)
 
void simple_table_tuple_update (Relation rel, ItemPointer otid, TupleTableSlot *slot, Snapshot snapshot, bool *update_indexes)
 
Size table_block_parallelscan_estimate (Relation rel)
 
Size table_block_parallelscan_initialize (Relation rel, ParallelTableScanDesc pscan)
 
void table_block_parallelscan_reinitialize (Relation rel, ParallelTableScanDesc pscan)
 
void table_block_parallelscan_startblock_init (Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
 
BlockNumber table_block_parallelscan_nextpage (Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
 
uint64 table_block_relation_size (Relation rel, ForkNumber forkNumber)
 
void table_block_relation_estimate_size (Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
 

Variables

char * default_table_access_method = DEFAULT_TABLE_ACCESS_METHOD
 
bool synchronize_seqscans = true
 

Macro Definition Documentation

◆ PARALLEL_SEQSCAN_MAX_CHUNK_SIZE

#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE   8192

Definition at line 45 of file tableam.c.

Referenced by table_block_parallelscan_startblock_init().

◆ PARALLEL_SEQSCAN_NCHUNKS

#define PARALLEL_SEQSCAN_NCHUNKS   2048

Definition at line 41 of file tableam.c.

Referenced by table_block_parallelscan_startblock_init().

◆ PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS

#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS   64

Definition at line 43 of file tableam.c.

Referenced by table_block_parallelscan_nextpage().

Function Documentation

◆ simple_table_tuple_delete()

void simple_table_tuple_delete ( Relation  rel,
ItemPointer  tid,
Snapshot  snapshot 
)

Definition at line 301 of file tableam.c.

References elog, ERROR, GetCurrentCommandId(), InvalidSnapshot, table_tuple_delete(), TM_Deleted, TM_Ok, TM_SelfModified, and TM_Updated.

Referenced by ExecSimpleRelationDelete(), and table_scan_sample_next_tuple().

302 {
303  TM_Result result;
304  TM_FailureData tmfd;
305 
306  result = table_tuple_delete(rel, tid,
307  GetCurrentCommandId(true),
308  snapshot, InvalidSnapshot,
309  true /* wait for commit */ ,
310  &tmfd, false /* changingPart */ );
311 
312  switch (result)
313  {
314  case TM_SelfModified:
315  /* Tuple was already updated in current command? */
316  elog(ERROR, "tuple already updated by self");
317  break;
318 
319  case TM_Ok:
320  /* done successfully */
321  break;
322 
323  case TM_Updated:
324  elog(ERROR, "tuple concurrently updated");
325  break;
326 
327  case TM_Deleted:
328  elog(ERROR, "tuple concurrently deleted");
329  break;
330 
331  default:
332  elog(ERROR, "unrecognized table_tuple_delete status: %u", result);
333  break;
334  }
335 }
#define ERROR
Definition: elog.h:43
#define InvalidSnapshot
Definition: snapshot.h:123
TM_Result
Definition: tableam.h:70
static TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: tableam.h:1267
Definition: tableam.h:76
#define elog(elevel,...)
Definition: elog.h:214
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:761

◆ simple_table_tuple_insert()

void simple_table_tuple_insert ( Relation  rel,
TupleTableSlot slot 
)

Definition at line 287 of file tableam.c.

References GetCurrentCommandId(), and table_tuple_insert().

Referenced by ExecSimpleRelationInsert(), and table_scan_sample_next_tuple().

288 {
289  table_tuple_insert(rel, slot, GetCurrentCommandId(true), 0, NULL);
290 }
static void table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate)
Definition: tableam.h:1178
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:761

◆ simple_table_tuple_update()

void simple_table_tuple_update ( Relation  rel,
ItemPointer  otid,
TupleTableSlot slot,
Snapshot  snapshot,
bool update_indexes 
)

Definition at line 346 of file tableam.c.

References elog, ERROR, GetCurrentCommandId(), InvalidSnapshot, table_tuple_update(), TM_Deleted, TM_Ok, TM_SelfModified, and TM_Updated.

Referenced by ExecSimpleRelationUpdate(), and table_scan_sample_next_tuple().

350 {
351  TM_Result result;
352  TM_FailureData tmfd;
353  LockTupleMode lockmode;
354 
355  result = table_tuple_update(rel, otid, slot,
356  GetCurrentCommandId(true),
357  snapshot, InvalidSnapshot,
358  true /* wait for commit */ ,
359  &tmfd, &lockmode, update_indexes);
360 
361  switch (result)
362  {
363  case TM_SelfModified:
364  /* Tuple was already updated in current command? */
365  elog(ERROR, "tuple already updated by self");
366  break;
367 
368  case TM_Ok:
369  /* done successfully */
370  break;
371 
372  case TM_Updated:
373  elog(ERROR, "tuple concurrently updated");
374  break;
375 
376  case TM_Deleted:
377  elog(ERROR, "tuple concurrently deleted");
378  break;
379 
380  default:
381  elog(ERROR, "unrecognized table_tuple_update status: %u", result);
382  break;
383  }
384 
385 }
LockTupleMode
Definition: lockoptions.h:49
#define ERROR
Definition: elog.h:43
#define InvalidSnapshot
Definition: snapshot.h:123
TM_Result
Definition: tableam.h:70
Definition: tableam.h:76
#define elog(elevel,...)
Definition: elog.h:214
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:761
static TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, bool *update_indexes)
Definition: tableam.h:1311

◆ table_beginscan_catalog()

TableScanDesc table_beginscan_catalog ( Relation  relation,
int  nkeys,
struct ScanKeyData key 
)

Definition at line 112 of file tableam.c.

References GetCatalogSnapshot(), RelationData::rd_tableam, RegisterSnapshot(), RelationGetRelid, TableAmRoutine::scan_begin, SO_ALLOW_PAGEMODE, SO_ALLOW_STRAT, SO_ALLOW_SYNC, SO_TEMP_SNAPSHOT, and SO_TYPE_SEQSCAN.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), index_update_stats(), objectsInSchemaToOids(), populate_typ_array(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveSubscriptionRel(), RenameTableSpace(), table_beginscan(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

113 {
114  uint32 flags = SO_TYPE_SEQSCAN |
116  Oid relid = RelationGetRelid(relation);
117  Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
118 
119  return relation->rd_tableam->scan_begin(relation, snapshot, nkeys, key,
120  NULL, flags);
121 }
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:810
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:386
unsigned int Oid
Definition: postgres_ext.h:31
TableScanDesc(* scan_begin)(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, ParallelTableScanDesc pscan, uint32 flags)
Definition: tableam.h:201
unsigned int uint32
Definition: c.h:374
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define RelationGetRelid(relation)
Definition: rel.h:456

◆ table_beginscan_parallel()

TableScanDesc table_beginscan_parallel ( Relation  relation,
ParallelTableScanDesc  parallel_scan 
)

Definition at line 175 of file tableam.c.

References Assert, ParallelTableScanDescData::phs_relid, ParallelTableScanDescData::phs_snapshot_any, ParallelTableScanDescData::phs_snapshot_off, RelationData::rd_tableam, RegisterSnapshot(), RelationGetRelid, RestoreSnapshot(), TableAmRoutine::scan_begin, SnapshotAny, SO_ALLOW_PAGEMODE, SO_ALLOW_STRAT, SO_ALLOW_SYNC, SO_TEMP_SNAPSHOT, and SO_TYPE_SEQSCAN.

Referenced by _bt_parallel_scan_and_sort(), ExecSeqScanInitializeDSM(), ExecSeqScanInitializeWorker(), and table_scan_getnextslot().

176 {
177  Snapshot snapshot;
178  uint32 flags = SO_TYPE_SEQSCAN |
180 
181  Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
182 
183  if (!parallel_scan->phs_snapshot_any)
184  {
185  /* Snapshot was serialized -- restore it */
186  snapshot = RestoreSnapshot((char *) parallel_scan +
187  parallel_scan->phs_snapshot_off);
188  RegisterSnapshot(snapshot);
189  flags |= SO_TEMP_SNAPSHOT;
190  }
191  else
192  {
193  /* SnapshotAny passed by caller (not serialized) */
194  snapshot = SnapshotAny;
195  }
196 
197  return relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
198  parallel_scan, flags);
199 }
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2157
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:810
TableScanDesc(* scan_begin)(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, ParallelTableScanDesc pscan, uint32 flags)
Definition: tableam.h:201
unsigned int uint32
Definition: c.h:374
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define Assert(condition)
Definition: c.h:745
#define SnapshotAny
Definition: snapmgr.h:68
#define RelationGetRelid(relation)
Definition: rel.h:456

◆ table_block_parallelscan_estimate()

Size table_block_parallelscan_estimate ( Relation  rel)

Definition at line 394 of file tableam.c.

Referenced by SampleHeapTupleVisible(), and table_scan_sample_next_tuple().

395 {
396  return sizeof(ParallelBlockTableScanDescData);
397 }
struct ParallelBlockTableScanDescData ParallelBlockTableScanDescData

◆ table_block_parallelscan_initialize()

Size table_block_parallelscan_initialize ( Relation  rel,
ParallelTableScanDesc  pscan 
)

Definition at line 400 of file tableam.c.

References ParallelBlockTableScanDescData::base, InvalidBlockNumber, NBuffers, pg_atomic_init_u64(), ParallelBlockTableScanDescData::phs_mutex, ParallelBlockTableScanDescData::phs_nallocated, ParallelBlockTableScanDescData::phs_nblocks, ParallelTableScanDescData::phs_relid, ParallelBlockTableScanDescData::phs_startblock, ParallelTableScanDescData::phs_syncscan, RelationGetNumberOfBlocks, RelationGetRelid, RelationUsesLocalBuffers, SpinLockInit, and synchronize_seqscans.

Referenced by SampleHeapTupleVisible(), and table_scan_sample_next_tuple().

401 {
403 
404  bpscan->base.phs_relid = RelationGetRelid(rel);
405  bpscan->phs_nblocks = RelationGetNumberOfBlocks(rel);
406  /* compare phs_syncscan initialization to similar logic in initscan */
408  !RelationUsesLocalBuffers(rel) &&
409  bpscan->phs_nblocks > NBuffers / 4;
410  SpinLockInit(&bpscan->phs_mutex);
412  pg_atomic_init_u64(&bpscan->phs_nallocated, 0);
413 
414  return sizeof(ParallelBlockTableScanDescData);
415 }
pg_atomic_uint64 phs_nallocated
Definition: relscan.h:79
bool synchronize_seqscans
Definition: tableam.c:49
ParallelTableScanDescData base
Definition: relscan.h:74
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:82
struct ParallelBlockTableScanDescData ParallelBlockTableScanDescData
#define SpinLockInit(lock)
Definition: spin.h:60
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:415
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:211
#define InvalidBlockNumber
Definition: block.h:33
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:572
int NBuffers
Definition: globals.c:132
#define RelationGetRelid(relation)
Definition: rel.h:456

◆ table_block_parallelscan_nextpage()

BlockNumber table_block_parallelscan_nextpage ( Relation  rel,
ParallelBlockTableScanWorker  pbscanwork,
ParallelBlockTableScanDesc  pbscan 
)

Definition at line 503 of file tableam.c.

References ParallelBlockTableScanDescData::base, InvalidBlockNumber, PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS, pg_atomic_fetch_add_u64(), ParallelBlockTableScanDescData::phs_nallocated, ParallelBlockTableScanDescData::phs_nblocks, ParallelBlockTableScanDescData::phs_startblock, ParallelTableScanDescData::phs_syncscan, ParallelBlockTableScanWorkerData::phsw_chunk_remaining, ParallelBlockTableScanWorkerData::phsw_chunk_size, ParallelBlockTableScanWorkerData::phsw_nallocated, and ss_report_location().

Referenced by heapgettup(), heapgettup_pagemode(), and table_scan_sample_next_tuple().

506 {
507  BlockNumber page;
508  uint64 nallocated;
509 
510  /*
511  * The logic below allocates block numbers out to parallel workers in a
512  * way that each worker will receive a set of consecutive block numbers to
513  * scan. Earlier versions of this would allocate the next highest block
514  * number to the next worker to call this function. This would generally
515  * result in workers never receiving consecutive block numbers. Some
516  * operating systems would not detect the sequential I/O pattern due to
517  * each backend being a different process which could result in poor
518  * performance due to inefficient or no readahead. To work around this
519  * issue, we now allocate a range of block numbers for each worker and
520  * when they come back for another block, we give them the next one in
521  * that range until the range is complete. When the worker completes the
522  * range of blocks we then allocate another range for it and return the
523  * first block number from that range.
524  *
525  * Here we name these ranges of blocks "chunks". The initial size of
526  * these chunks is determined in table_block_parallelscan_startblock_init
527  * based on the size of the relation. Towards the end of the scan, we
528  * start making reductions in the size of the chunks in order to attempt
529  * to divide the remaining work over all the workers as evenly as
530  * possible.
531  *
532  * Here pbscanwork is local worker memory. phsw_chunk_remaining tracks
533  * the number of blocks remaining in the chunk. When that reaches 0 then
534  * we must allocate a new chunk for the worker.
535  *
536  * phs_nallocated tracks how many blocks have been allocated to workers
537  * already. When phs_nallocated >= rs_nblocks, all blocks have been
538  * allocated.
539  *
540  * Because we use an atomic fetch-and-add to fetch the current value, the
541  * phs_nallocated counter will exceed rs_nblocks, because workers will
542  * still increment the value, when they try to allocate the next block but
543  * all blocks have been allocated already. The counter must be 64 bits
544  * wide because of that, to avoid wrapping around when rs_nblocks is close
545  * to 2^32.
546  *
547  * The actual block to return is calculated by adding the counter to the
548  * starting block number, modulo nblocks.
549  */
550 
551  /*
552  * First check if we have any remaining blocks in a previous chunk for
553  * this worker. We must consume all of the blocks from that before we
554  * allocate a new chunk to the worker.
555  */
556  if (pbscanwork->phsw_chunk_remaining > 0)
557  {
558  /*
559  * Give them the next block in the range and update the remaining
560  * number of blocks.
561  */
562  nallocated = ++pbscanwork->phsw_nallocated;
563  pbscanwork->phsw_chunk_remaining--;
564  }
565  else
566  {
567  /*
568  * When we've only got PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS chunks
569  * remaining in the scan, we half the chunk size. Since we reduce the
570  * chunk size here, we'll hit this again after doing
571  * PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS at the new size. After a few
572  * iterations of this, we'll end up doing the last few blocks with the
573  * chunk size set to 1.
574  */
575  if (pbscanwork->phsw_chunk_size > 1 &&
576  pbscanwork->phsw_nallocated > pbscan->phs_nblocks -
578  pbscanwork->phsw_chunk_size >>= 1;
579 
580  nallocated = pbscanwork->phsw_nallocated =
582  pbscanwork->phsw_chunk_size);
583 
584  /*
585  * Set the remaining number of blocks in this chunk so that subsequent
586  * calls from this worker continue on with this chunk until it's done.
587  */
588  pbscanwork->phsw_chunk_remaining = pbscanwork->phsw_chunk_size - 1;
589  }
590 
591  if (nallocated >= pbscan->phs_nblocks)
592  page = InvalidBlockNumber; /* all blocks have been allocated */
593  else
594  page = (nallocated + pbscan->phs_startblock) % pbscan->phs_nblocks;
595 
596  /*
597  * Report scan location. Normally, we report the current page number.
598  * When we reach the end of the scan, though, we report the starting page,
599  * not the ending page, just so the starting positions for later scans
600  * doesn't slew backwards. We only report the position at the end of the
601  * scan once, though: subsequent callers will report nothing.
602  */
603  if (pbscan->base.phs_syncscan)
604  {
605  if (page != InvalidBlockNumber)
606  ss_report_location(rel, page);
607  else if (nallocated == pbscan->phs_nblocks)
608  ss_report_location(rel, pbscan->phs_startblock);
609  }
610 
611  return page;
612 }
pg_atomic_uint64 phs_nallocated
Definition: relscan.h:79
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:467
ParallelTableScanDescData base
Definition: relscan.h:74
uint32 BlockNumber
Definition: block.h:31
#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS
Definition: tableam.c:43
#define InvalidBlockNumber
Definition: block.h:33
void ss_report_location(Relation rel, BlockNumber location)
Definition: syncscan.c:288

◆ table_block_parallelscan_reinitialize()

void table_block_parallelscan_reinitialize ( Relation  rel,
ParallelTableScanDesc  pscan 
)

Definition at line 418 of file tableam.c.

References pg_atomic_write_u64(), and ParallelBlockTableScanDescData::phs_nallocated.

Referenced by SampleHeapTupleVisible(), and table_scan_sample_next_tuple().

419 {
421 
422  pg_atomic_write_u64(&bpscan->phs_nallocated, 0);
423 }
pg_atomic_uint64 phs_nallocated
Definition: relscan.h:79
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:82
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:438

◆ table_block_parallelscan_startblock_init()

void table_block_parallelscan_startblock_init ( Relation  rel,
ParallelBlockTableScanWorker  pbscanwork,
ParallelBlockTableScanDesc  pbscan 
)

Definition at line 433 of file tableam.c.

References ParallelBlockTableScanDescData::base, InvalidBlockNumber, Max, MaxBlockNumber, Min, PARALLEL_SEQSCAN_MAX_CHUNK_SIZE, PARALLEL_SEQSCAN_NCHUNKS, pg_nextpower2_32(), ParallelBlockTableScanDescData::phs_mutex, ParallelBlockTableScanDescData::phs_nblocks, ParallelBlockTableScanDescData::phs_startblock, ParallelTableScanDescData::phs_syncscan, ParallelBlockTableScanWorkerData::phsw_chunk_size, SpinLockAcquire, SpinLockRelease, ss_get_location(), and StaticAssertStmt.

Referenced by heapgettup(), heapgettup_pagemode(), and table_scan_sample_next_tuple().

436 {
437  BlockNumber sync_startpage = InvalidBlockNumber;
438 
439  /* Reset the state we use for controlling allocation size. */
440  memset(pbscanwork, 0, sizeof(*pbscanwork));
441 
442  StaticAssertStmt(MaxBlockNumber <= 0xFFFFFFFE,
443  "pg_nextpower2_32 may be too small for non-standard BlockNumber width");
444 
445  /*
446  * We determine the chunk size based on the size of the relation. First we
447  * split the relation into PARALLEL_SEQSCAN_NCHUNKS chunks but we then
448  * take the next highest power of 2 number of the chunk size. This means
449  * we split the relation into somewhere between PARALLEL_SEQSCAN_NCHUNKS
450  * and PARALLEL_SEQSCAN_NCHUNKS / 2 chunks.
451  */
452  pbscanwork->phsw_chunk_size = pg_nextpower2_32(Max(pbscan->phs_nblocks /
454 
455  /*
456  * Ensure we don't go over the maximum chunk size with larger tables. This
457  * means we may get much more than PARALLEL_SEQSCAN_NCHUNKS for larger
458  * tables. Too large a chunk size has been shown to be detrimental to
459  * synchronous scan performance.
460  */
461  pbscanwork->phsw_chunk_size = Min(pbscanwork->phsw_chunk_size,
463 
464 retry:
465  /* Grab the spinlock. */
466  SpinLockAcquire(&pbscan->phs_mutex);
467 
468  /*
469  * If the scan's startblock has not yet been initialized, we must do so
470  * now. If this is not a synchronized scan, we just start at block 0, but
471  * if it is a synchronized scan, we must get the starting position from
472  * the synchronized scan machinery. We can't hold the spinlock while
473  * doing that, though, so release the spinlock, get the information we
474  * need, and retry. If nobody else has initialized the scan in the
475  * meantime, we'll fill in the value we fetched on the second time
476  * through.
477  */
478  if (pbscan->phs_startblock == InvalidBlockNumber)
479  {
480  if (!pbscan->base.phs_syncscan)
481  pbscan->phs_startblock = 0;
482  else if (sync_startpage != InvalidBlockNumber)
483  pbscan->phs_startblock = sync_startpage;
484  else
485  {
486  SpinLockRelease(&pbscan->phs_mutex);
487  sync_startpage = ss_get_location(rel, pbscan->phs_nblocks);
488  goto retry;
489  }
490  }
491  SpinLockRelease(&pbscan->phs_mutex);
492 }
ParallelTableScanDescData base
Definition: relscan.h:74
#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE
Definition: tableam.c:45
#define Min(x, y)
Definition: c.h:927
uint32 BlockNumber
Definition: block.h:31
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:859
#define SpinLockAcquire(lock)
Definition: spin.h:62
#define MaxBlockNumber
Definition: block.h:35
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:146
BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks)
Definition: syncscan.c:253
#define SpinLockRelease(lock)
Definition: spin.h:64
#define PARALLEL_SEQSCAN_NCHUNKS
Definition: tableam.c:41
#define Max(x, y)
Definition: c.h:921
#define InvalidBlockNumber
Definition: block.h:33

◆ table_block_relation_estimate_size()

void table_block_relation_estimate_size ( Relation  rel,
int32 attr_widths,
BlockNumber pages,
double *  tuples,
double *  allvisfrac,
Size  overhead_bytes_per_tuple,
Size  usable_bytes_per_page 
)

Definition at line 668 of file tableam.c.

References get_rel_data_width(), RelationData::rd_rel, and RelationGetNumberOfBlocks.

Referenced by heapam_estimate_rel_size(), and table_scan_sample_next_tuple().

673 {
674  BlockNumber curpages;
675  BlockNumber relpages;
676  double reltuples;
677  BlockNumber relallvisible;
678  double density;
679 
680  /* it should have storage, so we can call the smgr */
681  curpages = RelationGetNumberOfBlocks(rel);
682 
683  /* coerce values in pg_class to more desirable types */
684  relpages = (BlockNumber) rel->rd_rel->relpages;
685  reltuples = (double) rel->rd_rel->reltuples;
686  relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
687 
688  /*
689  * HACK: if the relation has never yet been vacuumed, use a minimum size
690  * estimate of 10 pages. The idea here is to avoid assuming a
691  * newly-created table is really small, even if it currently is, because
692  * that may not be true once some data gets loaded into it. Once a vacuum
693  * or analyze cycle has been done on it, it's more reasonable to believe
694  * the size is somewhat stable.
695  *
696  * (Note that this is only an issue if the plan gets cached and used again
697  * after the table has been filled. What we're trying to avoid is using a
698  * nestloop-type plan on a table that has grown substantially since the
699  * plan was made. Normally, autovacuum/autoanalyze will occur once enough
700  * inserts have happened and cause cached-plan invalidation; but that
701  * doesn't happen instantaneously, and it won't happen at all for cases
702  * such as temporary tables.)
703  *
704  * We test "never vacuumed" by seeing whether reltuples < 0.
705  *
706  * If the table has inheritance children, we don't apply this heuristic.
707  * Totally empty parent tables are quite common, so we should be willing
708  * to believe that they are empty.
709  */
710  if (curpages < 10 &&
711  reltuples < 0 &&
712  !rel->rd_rel->relhassubclass)
713  curpages = 10;
714 
715  /* report estimated # pages */
716  *pages = curpages;
717  /* quick exit if rel is clearly empty */
718  if (curpages == 0)
719  {
720  *tuples = 0;
721  *allvisfrac = 0;
722  return;
723  }
724 
725  /* estimate number of tuples from previous tuple density */
726  if (reltuples >= 0 && relpages > 0)
727  density = reltuples / (double) relpages;
728  else
729  {
730  /*
731  * When we have no data because the relation was never yet vacuumed,
732  * estimate tuple width from attribute datatypes. We assume here that
733  * the pages are completely full, which is OK for tables but is
734  * probably an overestimate for indexes. Fortunately
735  * get_relation_info() can clamp the overestimate to the parent
736  * table's size.
737  *
738  * Note: this code intentionally disregards alignment considerations,
739  * because (a) that would be gilding the lily considering how crude
740  * the estimate is, (b) it creates platform dependencies in the
741  * default plans which are kind of a headache for regression testing,
742  * and (c) different table AMs might use different padding schemes.
743  */
744  int32 tuple_width;
745 
746  tuple_width = get_rel_data_width(rel, attr_widths);
747  tuple_width += overhead_bytes_per_tuple;
748  /* note: integer division is intentional here */
749  density = usable_bytes_per_page / tuple_width;
750  }
751  *tuples = rint(density * (double) curpages);
752 
753  /*
754  * We use relallvisible as-is, rather than scaling it up like we do for
755  * the pages and tuples counts, on the theory that any pages added since
756  * the last VACUUM are most likely not marked all-visible. But costsize.c
757  * wants it converted to a fraction.
758  */
759  if (relallvisible == 0 || curpages <= 0)
760  *allvisfrac = 0;
761  else if ((double) relallvisible >= curpages)
762  *allvisfrac = 1;
763  else
764  *allvisfrac = (double) relallvisible / curpages;
765 }
uint32 BlockNumber
Definition: block.h:31
Form_pg_class rd_rel
Definition: rel.h:109
signed int int32
Definition: c.h:362
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:211
int32 get_rel_data_width(Relation rel, int32 *attr_widths)
Definition: plancat.c:1087

◆ table_block_relation_size()

uint64 table_block_relation_size ( Relation  rel,
ForkNumber  forkNumber 
)

Definition at line 628 of file tableam.c.

References i, InvalidForkNumber, MAX_FORKNUM, RelationData::rd_smgr, RelationOpenSmgr, and smgrnblocks().

Referenced by SampleHeapTupleVisible(), and table_scan_sample_next_tuple().

629 {
630  uint64 nblocks = 0;
631 
632  /* Open it at the smgr level if not already done */
633  RelationOpenSmgr(rel);
634 
635  /* InvalidForkNumber indicates returning the size for all forks */
636  if (forkNumber == InvalidForkNumber)
637  {
638  for (int i = 0; i < MAX_FORKNUM; i++)
639  nblocks += smgrnblocks(rel->rd_smgr, i);
640  }
641  else
642  nblocks = smgrnblocks(rel->rd_smgr, forkNumber);
643 
644  return nblocks * BLCKSZ;
645 }
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
#define RelationOpenSmgr(relation)
Definition: rel.h:513
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:549
#define MAX_FORKNUM
Definition: relpath.h:55
int i

◆ table_index_fetch_tuple_check()

bool table_index_fetch_tuple_check ( Relation  rel,
ItemPointer  tid,
Snapshot  snapshot,
bool all_dead 
)

Definition at line 219 of file tableam.c.

References ExecDropSingleTupleTableSlot(), table_index_fetch_begin(), table_index_fetch_end(), table_index_fetch_tuple(), and table_slot_create().

Referenced by _bt_check_unique(), and table_index_fetch_tuple().

223 {
224  IndexFetchTableData *scan;
225  TupleTableSlot *slot;
226  bool call_again = false;
227  bool found;
228 
229  slot = table_slot_create(rel, NULL);
230  scan = table_index_fetch_begin(rel);
231  found = table_index_fetch_tuple(scan, tid, snapshot, slot, &call_again,
232  all_dead);
233  table_index_fetch_end(scan);
235 
236  return found;
237 }
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:91
static IndexFetchTableData * table_index_fetch_begin(Relation rel)
Definition: tableam.h:975
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
static bool table_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
Definition: tableam.h:1024
static void table_index_fetch_end(struct IndexFetchTableData *scan)
Definition: tableam.h:994

◆ table_parallelscan_estimate()

Size table_parallelscan_estimate ( Relation  rel,
Snapshot  snapshot 
)

Definition at line 140 of file tableam.c.

References add_size(), Assert, EstimateSnapshotSpace(), IsMVCCSnapshot, TableAmRoutine::parallelscan_estimate, RelationData::rd_tableam, and SnapshotAny.

Referenced by _bt_parallel_estimate_shared(), ExecSeqScanEstimate(), and table_scan_getnextslot().

141 {
142  Size sz = 0;
143 
144  if (IsMVCCSnapshot(snapshot))
145  sz = add_size(sz, EstimateSnapshotSpace(snapshot));
146  else
147  Assert(snapshot == SnapshotAny);
148 
149  sz = add_size(sz, rel->rd_tableam->parallelscan_estimate(rel));
150 
151  return sz;
152 }
Size(* parallelscan_estimate)(Relation rel)
Definition: tableam.h:238
Size EstimateSnapshotSpace(Snapshot snap)
Definition: snapmgr.c:2074
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define Assert(condition)
Definition: c.h:745
size_t Size
Definition: c.h:473
#define SnapshotAny
Definition: snapmgr.h:68

◆ table_parallelscan_initialize()

void table_parallelscan_initialize ( Relation  rel,
ParallelTableScanDesc  pscan,
Snapshot  snapshot 
)

Definition at line 155 of file tableam.c.

References Assert, IsMVCCSnapshot, TableAmRoutine::parallelscan_initialize, ParallelTableScanDescData::phs_snapshot_any, ParallelTableScanDescData::phs_snapshot_off, RelationData::rd_tableam, SerializeSnapshot(), and SnapshotAny.

Referenced by _bt_begin_parallel(), ExecSeqScanInitializeDSM(), and table_scan_getnextslot().

157 {
158  Size snapshot_off = rel->rd_tableam->parallelscan_initialize(rel, pscan);
159 
160  pscan->phs_snapshot_off = snapshot_off;
161 
162  if (IsMVCCSnapshot(snapshot))
163  {
164  SerializeSnapshot(snapshot, (char *) pscan + pscan->phs_snapshot_off);
165  pscan->phs_snapshot_any = false;
166  }
167  else
168  {
169  Assert(snapshot == SnapshotAny);
170  pscan->phs_snapshot_any = true;
171  }
172 }
Size(* parallelscan_initialize)(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.h:245
void SerializeSnapshot(Snapshot snapshot, char *start_address)
Definition: snapmgr.c:2098
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define Assert(condition)
Definition: c.h:745
size_t Size
Definition: c.h:473
#define SnapshotAny
Definition: snapmgr.h:68

◆ table_scan_update_snapshot()

void table_scan_update_snapshot ( TableScanDesc  scan,
Snapshot  snapshot 
)

Definition at line 124 of file tableam.c.

References Assert, IsMVCCSnapshot, RegisterSnapshot(), TableScanDescData::rs_flags, TableScanDescData::rs_snapshot, and SO_TEMP_SNAPSHOT.

Referenced by ExecBitmapHeapInitializeWorker(), and table_rescan_set_params().

125 {
126  Assert(IsMVCCSnapshot(snapshot));
127 
128  RegisterSnapshot(snapshot);
129  scan->rs_snapshot = snapshot;
130  scan->rs_flags |= SO_TEMP_SNAPSHOT;
131 }
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:810
uint32 rs_flags
Definition: relscan.h:43
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define Assert(condition)
Definition: c.h:745
struct SnapshotData * rs_snapshot
Definition: relscan.h:35

◆ table_slot_callbacks()

const TupleTableSlotOps* table_slot_callbacks ( Relation  relation)

Definition at line 58 of file tableam.c.

References Assert, RelationData::rd_rel, RelationData::rd_tableam, TableAmRoutine::slot_callbacks, TTSOpsHeapTuple, and TTSOpsVirtual.

Referenced by ATRewriteTable(), ExecGetReturningSlot(), ExecGetTriggerNewSlot(), ExecGetTriggerOldSlot(), ExecInitBitmapHeapScan(), ExecInitIndexOnlyScan(), ExecInitIndexScan(), ExecInitModifyTable(), ExecInitSampleScan(), ExecInitSeqScan(), ExecInitTidScan(), and table_slot_create().

59 {
60  const TupleTableSlotOps *tts_cb;
61 
62  if (relation->rd_tableam)
63  tts_cb = relation->rd_tableam->slot_callbacks(relation);
64  else if (relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
65  {
66  /*
67  * Historically FDWs expect to store heap tuples in slots. Continue
68  * handing them one, to make it less painful to adapt FDWs to new
69  * versions. The cost of a heap slot over a virtual slot is pretty
70  * small.
71  */
72  tts_cb = &TTSOpsHeapTuple;
73  }
74  else
75  {
76  /*
77  * These need to be supported, as some parts of the code (like COPY)
78  * need to create slots for such relations too. It seems better to
79  * centralize the knowledge that a heap slot is the right thing in
80  * that case here.
81  */
82  Assert(relation->rd_rel->relkind == RELKIND_VIEW ||
83  relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE);
84  tts_cb = &TTSOpsVirtual;
85  }
86 
87  return tts_cb;
88 }
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
Form_pg_class rd_rel
Definition: rel.h:109
const TupleTableSlotOps *(* slot_callbacks)(Relation rel)
Definition: tableam.h:177
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define Assert(condition)
Definition: c.h:745
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:84

◆ table_slot_create()

TupleTableSlot* table_slot_create ( Relation  relation,
List **  reglist 
)

Definition at line 91 of file tableam.c.

References lappend(), MakeSingleTupleTableSlot(), RelationGetDescr, and table_slot_callbacks().

Referenced by acquire_sample_rows(), AlterDomainNotNull(), apply_handle_tuple_routing(), check_default_partition_contents(), check_exclusion_or_unique_constraint(), CopyFrom(), CopyMultiInsertInfoNextFreeSlot(), CopyTo(), DefineQueryRewrite(), EvalPlanQualSlot(), ExecInitModifyTable(), ExecInitPartitionInfo(), ExecInitRoutingInfo(), FindReplTupleInLocalRel(), get_actual_variable_range(), heapam_index_build_range_scan(), heapam_relation_copy_for_cluster(), IndexCheckExclusion(), RelationFindReplTupleSeq(), systable_beginscan(), systable_beginscan_ordered(), table_index_fetch_tuple_check(), unique_key_recheck(), validateDomainConstraint(), and validateForeignKeyConstraint().

92 {
93  const TupleTableSlotOps *tts_cb;
94  TupleTableSlot *slot;
95 
96  tts_cb = table_slot_callbacks(relation);
97  slot = MakeSingleTupleTableSlot(RelationGetDescr(relation), tts_cb);
98 
99  if (reglist)
100  *reglist = lappend(*reglist, slot);
101 
102  return slot;
103 }
#define RelationGetDescr(relation)
Definition: rel.h:482
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1208
const TupleTableSlotOps * table_slot_callbacks(Relation relation)
Definition: tableam.c:58
List * lappend(List *list, void *datum)
Definition: list.c:321

◆ table_tuple_get_latest_tid()

void table_tuple_get_latest_tid ( TableScanDesc  scan,
ItemPointer  tid 
)

Definition at line 246 of file tableam.c.

References bsysscan, CheckXidAlive, elog, ereport, errcode(), errmsg(), ERROR, ItemPointerGetBlockNumberNoCheck, ItemPointerGetOffsetNumberNoCheck, RelationData::rd_tableam, RelationGetRelationName, TableScanDescData::rs_rd, TransactionIdIsValid, TableAmRoutine::tuple_get_latest_tid, TableAmRoutine::tuple_tid_valid, and unlikely.

Referenced by currtid_byrelname(), currtid_byreloid(), table_tuple_tid_valid(), and TidNext().

247 {
248  Relation rel = scan->rs_rd;
249  const TableAmRoutine *tableam = rel->rd_tableam;
250 
251  /*
252  * We don't expect direct calls to table_tuple_get_latest_tid with valid
253  * CheckXidAlive for catalog or regular tables. See detailed comments in
254  * xact.c where these variables are declared.
255  */
257  elog(ERROR, "unexpected table_tuple_get_latest_tid call during logical decoding");
258 
259  /*
260  * Since this can be called with user-supplied TID, don't trust the input
261  * too much.
262  */
263  if (!tableam->tuple_tid_valid(scan, tid))
264  ereport(ERROR,
265  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
266  errmsg("tid (%u, %u) is not valid for relation \"%s\"",
269  RelationGetRelationName(rel))));
270 
271  tableam->tuple_get_latest_tid(scan, tid);
272 }
#define ItemPointerGetOffsetNumberNoCheck(pointer)
Definition: itemptr.h:108
int errcode(int sqlerrcode)
Definition: elog.c:610
#define ERROR
Definition: elog.h:43
bool bsysscan
Definition: xact.c:96
#define RelationGetRelationName(relation)
Definition: rel.h:490
TransactionId CheckXidAlive
Definition: xact.c:95
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define ereport(elevel,...)
Definition: elog.h:144
void(* tuple_get_latest_tid)(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:334
Relation rs_rd
Definition: relscan.h:34
bool(* tuple_tid_valid)(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:327
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define ItemPointerGetBlockNumberNoCheck(pointer)
Definition: itemptr.h:89
#define elog(elevel,...)
Definition: elog.h:214
#define unlikely(x)
Definition: c.h:206
#define TransactionIdIsValid(xid)
Definition: transam.h:41

Variable Documentation

◆ default_table_access_method

char* default_table_access_method = DEFAULT_TABLE_ACCESS_METHOD

Definition at line 48 of file tableam.c.

Referenced by DefineRelation().

◆ synchronize_seqscans

bool synchronize_seqscans = true

Definition at line 49 of file tableam.c.

Referenced by initscan(), and table_block_parallelscan_initialize().