PostgreSQL Source Code  git master
tableam.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/heapam.h"
#include "access/tableam.h"
#include "access/xact.h"
#include "optimizer/plancat.h"
#include "storage/bufmgr.h"
#include "storage/shmem.h"
#include "storage/smgr.h"
Include dependency graph for tableam.c:

Go to the source code of this file.

Functions

const TupleTableSlotOpstable_slot_callbacks (Relation relation)
 
TupleTableSlottable_slot_create (Relation relation, List **reglist)
 
TableScanDesc table_beginscan_catalog (Relation relation, int nkeys, struct ScanKeyData *key)
 
void table_scan_update_snapshot (TableScanDesc scan, Snapshot snapshot)
 
Size table_parallelscan_estimate (Relation rel, Snapshot snapshot)
 
void table_parallelscan_initialize (Relation rel, ParallelTableScanDesc pscan, Snapshot snapshot)
 
TableScanDesc table_beginscan_parallel (Relation relation, ParallelTableScanDesc parallel_scan)
 
bool table_index_fetch_tuple_check (Relation rel, ItemPointer tid, Snapshot snapshot, bool *all_dead)
 
void table_tuple_get_latest_tid (TableScanDesc scan, ItemPointer tid)
 
void simple_table_tuple_insert (Relation rel, TupleTableSlot *slot)
 
void simple_table_tuple_delete (Relation rel, ItemPointer tid, Snapshot snapshot)
 
void simple_table_tuple_update (Relation rel, ItemPointer otid, TupleTableSlot *slot, Snapshot snapshot, bool *update_indexes)
 
Size table_block_parallelscan_estimate (Relation rel)
 
Size table_block_parallelscan_initialize (Relation rel, ParallelTableScanDesc pscan)
 
void table_block_parallelscan_reinitialize (Relation rel, ParallelTableScanDesc pscan)
 
void table_block_parallelscan_startblock_init (Relation rel, ParallelBlockTableScanDesc pbscan)
 
BlockNumber table_block_parallelscan_nextpage (Relation rel, ParallelBlockTableScanDesc pbscan)
 
uint64 table_block_relation_size (Relation rel, ForkNumber forkNumber)
 
void table_block_relation_estimate_size (Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
 

Variables

char * default_table_access_method = DEFAULT_TABLE_ACCESS_METHOD
 
bool synchronize_seqscans = true
 

Function Documentation

◆ simple_table_tuple_delete()

void simple_table_tuple_delete ( Relation  rel,
ItemPointer  tid,
Snapshot  snapshot 
)

Definition at line 279 of file tableam.c.

References elog, ERROR, GetCurrentCommandId(), InvalidSnapshot, table_tuple_delete(), TM_Deleted, TM_Ok, TM_SelfModified, and TM_Updated.

Referenced by ExecSimpleRelationDelete(), and table_scan_sample_next_tuple().

280 {
281  TM_Result result;
282  TM_FailureData tmfd;
283 
284  result = table_tuple_delete(rel, tid,
285  GetCurrentCommandId(true),
286  snapshot, InvalidSnapshot,
287  true /* wait for commit */ ,
288  &tmfd, false /* changingPart */ );
289 
290  switch (result)
291  {
292  case TM_SelfModified:
293  /* Tuple was already updated in current command? */
294  elog(ERROR, "tuple already updated by self");
295  break;
296 
297  case TM_Ok:
298  /* done successfully */
299  break;
300 
301  case TM_Updated:
302  elog(ERROR, "tuple concurrently updated");
303  break;
304 
305  case TM_Deleted:
306  elog(ERROR, "tuple concurrently deleted");
307  break;
308 
309  default:
310  elog(ERROR, "unrecognized table_tuple_delete status: %u", result);
311  break;
312  }
313 }
#define ERROR
Definition: elog.h:43
#define InvalidSnapshot
Definition: snapshot.h:123
TM_Result
Definition: tableam.h:69
static TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: tableam.h:1241
Definition: tableam.h:75
#define elog(elevel,...)
Definition: elog.h:214
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:746

◆ simple_table_tuple_insert()

void simple_table_tuple_insert ( Relation  rel,
TupleTableSlot slot 
)

Definition at line 265 of file tableam.c.

References GetCurrentCommandId(), and table_tuple_insert().

Referenced by ExecSimpleRelationInsert(), and table_scan_sample_next_tuple().

266 {
267  table_tuple_insert(rel, slot, GetCurrentCommandId(true), 0, NULL);
268 }
static void table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate)
Definition: tableam.h:1152
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:746

◆ simple_table_tuple_update()

void simple_table_tuple_update ( Relation  rel,
ItemPointer  otid,
TupleTableSlot slot,
Snapshot  snapshot,
bool update_indexes 
)

Definition at line 324 of file tableam.c.

References elog, ERROR, GetCurrentCommandId(), InvalidSnapshot, table_tuple_update(), TM_Deleted, TM_Ok, TM_SelfModified, and TM_Updated.

Referenced by ExecSimpleRelationUpdate(), and table_scan_sample_next_tuple().

328 {
329  TM_Result result;
330  TM_FailureData tmfd;
331  LockTupleMode lockmode;
332 
333  result = table_tuple_update(rel, otid, slot,
334  GetCurrentCommandId(true),
335  snapshot, InvalidSnapshot,
336  true /* wait for commit */ ,
337  &tmfd, &lockmode, update_indexes);
338 
339  switch (result)
340  {
341  case TM_SelfModified:
342  /* Tuple was already updated in current command? */
343  elog(ERROR, "tuple already updated by self");
344  break;
345 
346  case TM_Ok:
347  /* done successfully */
348  break;
349 
350  case TM_Updated:
351  elog(ERROR, "tuple concurrently updated");
352  break;
353 
354  case TM_Deleted:
355  elog(ERROR, "tuple concurrently deleted");
356  break;
357 
358  default:
359  elog(ERROR, "unrecognized table_tuple_update status: %u", result);
360  break;
361  }
362 
363 }
LockTupleMode
Definition: lockoptions.h:49
#define ERROR
Definition: elog.h:43
#define InvalidSnapshot
Definition: snapshot.h:123
TM_Result
Definition: tableam.h:69
Definition: tableam.h:75
#define elog(elevel,...)
Definition: elog.h:214
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:746
static TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, bool *update_indexes)
Definition: tableam.h:1285

◆ table_beginscan_catalog()

TableScanDesc table_beginscan_catalog ( Relation  relation,
int  nkeys,
struct ScanKeyData key 
)

Definition at line 98 of file tableam.c.

References GetCatalogSnapshot(), RelationData::rd_tableam, RegisterSnapshot(), RelationGetRelid, TableAmRoutine::scan_begin, SO_ALLOW_PAGEMODE, SO_ALLOW_STRAT, SO_ALLOW_SYNC, SO_TEMP_SNAPSHOT, and SO_TYPE_SEQSCAN.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), boot_openrel(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), objectsInSchemaToOids(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveSubscriptionRel(), RenameTableSpace(), table_beginscan(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

99 {
100  uint32 flags = SO_TYPE_SEQSCAN |
102  Oid relid = RelationGetRelid(relation);
103  Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
104 
105  return relation->rd_tableam->scan_begin(relation, snapshot, nkeys, key,
106  NULL, flags);
107 }
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:865
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:442
unsigned int Oid
Definition: postgres_ext.h:31
TableScanDesc(* scan_begin)(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, ParallelTableScanDesc pscan, uint32 flags)
Definition: tableam.h:200
unsigned int uint32
Definition: c.h:367
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define RelationGetRelid(relation)
Definition: rel.h:456

◆ table_beginscan_parallel()

TableScanDesc table_beginscan_parallel ( Relation  relation,
ParallelTableScanDesc  parallel_scan 
)

Definition at line 161 of file tableam.c.

References Assert, ParallelTableScanDescData::phs_relid, ParallelTableScanDescData::phs_snapshot_any, ParallelTableScanDescData::phs_snapshot_off, RelationData::rd_tableam, RegisterSnapshot(), RelationGetRelid, RestoreSnapshot(), TableAmRoutine::scan_begin, SnapshotAny, SO_ALLOW_PAGEMODE, SO_ALLOW_STRAT, SO_ALLOW_SYNC, SO_TEMP_SNAPSHOT, and SO_TYPE_SEQSCAN.

Referenced by _bt_parallel_scan_and_sort(), ExecSeqScanInitializeDSM(), ExecSeqScanInitializeWorker(), and table_scan_getnextslot().

162 {
163  Snapshot snapshot;
164  uint32 flags = SO_TYPE_SEQSCAN |
166 
167  Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
168 
169  if (!parallel_scan->phs_snapshot_any)
170  {
171  /* Snapshot was serialized -- restore it */
172  snapshot = RestoreSnapshot((char *) parallel_scan +
173  parallel_scan->phs_snapshot_off);
174  RegisterSnapshot(snapshot);
175  flags |= SO_TEMP_SNAPSHOT;
176  }
177  else
178  {
179  /* SnapshotAny passed by caller (not serialized) */
180  snapshot = SnapshotAny;
181  }
182 
183  return relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
184  parallel_scan, flags);
185 }
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2161
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:865
TableScanDesc(* scan_begin)(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, ParallelTableScanDesc pscan, uint32 flags)
Definition: tableam.h:200
unsigned int uint32
Definition: c.h:367
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define Assert(condition)
Definition: c.h:738
#define SnapshotAny
Definition: snapmgr.h:69
#define RelationGetRelid(relation)
Definition: rel.h:456

◆ table_block_parallelscan_estimate()

Size table_block_parallelscan_estimate ( Relation  rel)

Definition at line 372 of file tableam.c.

Referenced by SampleHeapTupleVisible(), and table_scan_sample_next_tuple().

373 {
374  return sizeof(ParallelBlockTableScanDescData);
375 }
struct ParallelBlockTableScanDescData ParallelBlockTableScanDescData

◆ table_block_parallelscan_initialize()

Size table_block_parallelscan_initialize ( Relation  rel,
ParallelTableScanDesc  pscan 
)

Definition at line 378 of file tableam.c.

References ParallelBlockTableScanDescData::base, InvalidBlockNumber, NBuffers, pg_atomic_init_u64(), ParallelBlockTableScanDescData::phs_mutex, ParallelBlockTableScanDescData::phs_nallocated, ParallelBlockTableScanDescData::phs_nblocks, ParallelTableScanDescData::phs_relid, ParallelBlockTableScanDescData::phs_startblock, ParallelTableScanDescData::phs_syncscan, RelationGetNumberOfBlocks, RelationGetRelid, RelationUsesLocalBuffers, SpinLockInit, and synchronize_seqscans.

Referenced by SampleHeapTupleVisible(), and table_scan_sample_next_tuple().

379 {
381 
382  bpscan->base.phs_relid = RelationGetRelid(rel);
383  bpscan->phs_nblocks = RelationGetNumberOfBlocks(rel);
384  /* compare phs_syncscan initialization to similar logic in initscan */
386  !RelationUsesLocalBuffers(rel) &&
387  bpscan->phs_nblocks > NBuffers / 4;
388  SpinLockInit(&bpscan->phs_mutex);
390  pg_atomic_init_u64(&bpscan->phs_nallocated, 0);
391 
392  return sizeof(ParallelBlockTableScanDescData);
393 }
pg_atomic_uint64 phs_nallocated
Definition: relscan.h:79
bool synchronize_seqscans
Definition: tableam.c:35
ParallelTableScanDescData base
Definition: relscan.h:74
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:82
struct ParallelBlockTableScanDescData ParallelBlockTableScanDescData
#define SpinLockInit(lock)
Definition: spin.h:60
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:415
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:211
#define InvalidBlockNumber
Definition: block.h:33
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:572
int NBuffers
Definition: globals.c:131
#define RelationGetRelid(relation)
Definition: rel.h:456

◆ table_block_parallelscan_nextpage()

BlockNumber table_block_parallelscan_nextpage ( Relation  rel,
ParallelBlockTableScanDesc  pbscan 
)

Definition at line 454 of file tableam.c.

References ParallelBlockTableScanDescData::base, InvalidBlockNumber, pg_atomic_fetch_add_u64(), ParallelBlockTableScanDescData::phs_nallocated, ParallelBlockTableScanDescData::phs_nblocks, ParallelBlockTableScanDescData::phs_startblock, ParallelTableScanDescData::phs_syncscan, and ss_report_location().

Referenced by heapgettup(), heapgettup_pagemode(), and table_scan_sample_next_tuple().

455 {
456  BlockNumber page;
457  uint64 nallocated;
458 
459  /*
460  * phs_nallocated tracks how many pages have been allocated to workers
461  * already. When phs_nallocated >= rs_nblocks, all blocks have been
462  * allocated.
463  *
464  * Because we use an atomic fetch-and-add to fetch the current value, the
465  * phs_nallocated counter will exceed rs_nblocks, because workers will
466  * still increment the value, when they try to allocate the next block but
467  * all blocks have been allocated already. The counter must be 64 bits
468  * wide because of that, to avoid wrapping around when rs_nblocks is close
469  * to 2^32.
470  *
471  * The actual page to return is calculated by adding the counter to the
472  * starting block number, modulo nblocks.
473  */
474  nallocated = pg_atomic_fetch_add_u64(&pbscan->phs_nallocated, 1);
475  if (nallocated >= pbscan->phs_nblocks)
476  page = InvalidBlockNumber; /* all blocks have been allocated */
477  else
478  page = (nallocated + pbscan->phs_startblock) % pbscan->phs_nblocks;
479 
480  /*
481  * Report scan location. Normally, we report the current page number.
482  * When we reach the end of the scan, though, we report the starting page,
483  * not the ending page, just so the starting positions for later scans
484  * doesn't slew backwards. We only report the position at the end of the
485  * scan once, though: subsequent callers will report nothing.
486  */
487  if (pbscan->base.phs_syncscan)
488  {
489  if (page != InvalidBlockNumber)
490  ss_report_location(rel, page);
491  else if (nallocated == pbscan->phs_nblocks)
492  ss_report_location(rel, pbscan->phs_startblock);
493  }
494 
495  return page;
496 }
pg_atomic_uint64 phs_nallocated
Definition: relscan.h:79
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:467
ParallelTableScanDescData base
Definition: relscan.h:74
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
void ss_report_location(Relation rel, BlockNumber location)
Definition: syncscan.c:288

◆ table_block_parallelscan_reinitialize()

void table_block_parallelscan_reinitialize ( Relation  rel,
ParallelTableScanDesc  pscan 
)

Definition at line 396 of file tableam.c.

References pg_atomic_write_u64(), and ParallelBlockTableScanDescData::phs_nallocated.

Referenced by SampleHeapTupleVisible(), and table_scan_sample_next_tuple().

397 {
399 
400  pg_atomic_write_u64(&bpscan->phs_nallocated, 0);
401 }
pg_atomic_uint64 phs_nallocated
Definition: relscan.h:79
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:82
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:438

◆ table_block_parallelscan_startblock_init()

void table_block_parallelscan_startblock_init ( Relation  rel,
ParallelBlockTableScanDesc  pbscan 
)

Definition at line 411 of file tableam.c.

References ParallelBlockTableScanDescData::base, InvalidBlockNumber, ParallelBlockTableScanDescData::phs_mutex, ParallelBlockTableScanDescData::phs_nblocks, ParallelBlockTableScanDescData::phs_startblock, ParallelTableScanDescData::phs_syncscan, SpinLockAcquire, SpinLockRelease, and ss_get_location().

Referenced by heapgettup(), heapgettup_pagemode(), and table_scan_sample_next_tuple().

412 {
413  BlockNumber sync_startpage = InvalidBlockNumber;
414 
415 retry:
416  /* Grab the spinlock. */
417  SpinLockAcquire(&pbscan->phs_mutex);
418 
419  /*
420  * If the scan's startblock has not yet been initialized, we must do so
421  * now. If this is not a synchronized scan, we just start at block 0, but
422  * if it is a synchronized scan, we must get the starting position from
423  * the synchronized scan machinery. We can't hold the spinlock while
424  * doing that, though, so release the spinlock, get the information we
425  * need, and retry. If nobody else has initialized the scan in the
426  * meantime, we'll fill in the value we fetched on the second time
427  * through.
428  */
429  if (pbscan->phs_startblock == InvalidBlockNumber)
430  {
431  if (!pbscan->base.phs_syncscan)
432  pbscan->phs_startblock = 0;
433  else if (sync_startpage != InvalidBlockNumber)
434  pbscan->phs_startblock = sync_startpage;
435  else
436  {
437  SpinLockRelease(&pbscan->phs_mutex);
438  sync_startpage = ss_get_location(rel, pbscan->phs_nblocks);
439  goto retry;
440  }
441  }
442  SpinLockRelease(&pbscan->phs_mutex);
443 }
ParallelTableScanDescData base
Definition: relscan.h:74
uint32 BlockNumber
Definition: block.h:31
#define SpinLockAcquire(lock)
Definition: spin.h:62
BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks)
Definition: syncscan.c:253
#define SpinLockRelease(lock)
Definition: spin.h:64
#define InvalidBlockNumber
Definition: block.h:33

◆ table_block_relation_estimate_size()

void table_block_relation_estimate_size ( Relation  rel,
int32 attr_widths,
BlockNumber pages,
double *  tuples,
double *  allvisfrac,
Size  overhead_bytes_per_tuple,
Size  usable_bytes_per_page 
)

Definition at line 552 of file tableam.c.

References get_rel_data_width(), RelationData::rd_rel, and RelationGetNumberOfBlocks.

Referenced by heapam_estimate_rel_size(), and table_scan_sample_next_tuple().

557 {
558  BlockNumber curpages;
559  BlockNumber relpages;
560  double reltuples;
561  BlockNumber relallvisible;
562  double density;
563 
564  /* it should have storage, so we can call the smgr */
565  curpages = RelationGetNumberOfBlocks(rel);
566 
567  /* coerce values in pg_class to more desirable types */
568  relpages = (BlockNumber) rel->rd_rel->relpages;
569  reltuples = (double) rel->rd_rel->reltuples;
570  relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
571 
572  /*
573  * HACK: if the relation has never yet been vacuumed, use a minimum size
574  * estimate of 10 pages. The idea here is to avoid assuming a
575  * newly-created table is really small, even if it currently is, because
576  * that may not be true once some data gets loaded into it. Once a vacuum
577  * or analyze cycle has been done on it, it's more reasonable to believe
578  * the size is somewhat stable.
579  *
580  * (Note that this is only an issue if the plan gets cached and used again
581  * after the table has been filled. What we're trying to avoid is using a
582  * nestloop-type plan on a table that has grown substantially since the
583  * plan was made. Normally, autovacuum/autoanalyze will occur once enough
584  * inserts have happened and cause cached-plan invalidation; but that
585  * doesn't happen instantaneously, and it won't happen at all for cases
586  * such as temporary tables.)
587  *
588  * We approximate "never vacuumed" by "has relpages = 0", which means this
589  * will also fire on genuinely empty relations. Not great, but
590  * fortunately that's a seldom-seen case in the real world, and it
591  * shouldn't degrade the quality of the plan too much anyway to err in
592  * this direction.
593  *
594  * If the table has inheritance children, we don't apply this heuristic.
595  * Totally empty parent tables are quite common, so we should be willing
596  * to believe that they are empty.
597  */
598  if (curpages < 10 &&
599  relpages == 0 &&
600  !rel->rd_rel->relhassubclass)
601  curpages = 10;
602 
603  /* report estimated # pages */
604  *pages = curpages;
605  /* quick exit if rel is clearly empty */
606  if (curpages == 0)
607  {
608  *tuples = 0;
609  *allvisfrac = 0;
610  return;
611  }
612 
613  /* estimate number of tuples from previous tuple density */
614  if (relpages > 0)
615  density = reltuples / (double) relpages;
616  else
617  {
618  /*
619  * When we have no data because the relation was truncated, estimate
620  * tuple width from attribute datatypes. We assume here that the
621  * pages are completely full, which is OK for tables (since they've
622  * presumably not been VACUUMed yet) but is probably an overestimate
623  * for indexes. Fortunately get_relation_info() can clamp the
624  * overestimate to the parent table's size.
625  *
626  * Note: this code intentionally disregards alignment considerations,
627  * because (a) that would be gilding the lily considering how crude
628  * the estimate is, (b) it creates platform dependencies in the
629  * default plans which are kind of a headache for regression testing,
630  * and (c) different table AMs might use different padding schemes.
631  */
632  int32 tuple_width;
633 
634  tuple_width = get_rel_data_width(rel, attr_widths);
635  tuple_width += overhead_bytes_per_tuple;
636  /* note: integer division is intentional here */
637  density = usable_bytes_per_page / tuple_width;
638  }
639  *tuples = rint(density * (double) curpages);
640 
641  /*
642  * We use relallvisible as-is, rather than scaling it up like we do for
643  * the pages and tuples counts, on the theory that any pages added since
644  * the last VACUUM are most likely not marked all-visible. But costsize.c
645  * wants it converted to a fraction.
646  */
647  if (relallvisible == 0 || curpages <= 0)
648  *allvisfrac = 0;
649  else if ((double) relallvisible >= curpages)
650  *allvisfrac = 1;
651  else
652  *allvisfrac = (double) relallvisible / curpages;
653 }
uint32 BlockNumber
Definition: block.h:31
Form_pg_class rd_rel
Definition: rel.h:109
signed int int32
Definition: c.h:355
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:211
int32 get_rel_data_width(Relation rel, int32 *attr_widths)
Definition: plancat.c:1090

◆ table_block_relation_size()

uint64 table_block_relation_size ( Relation  rel,
ForkNumber  forkNumber 
)

Definition at line 512 of file tableam.c.

References i, InvalidForkNumber, MAX_FORKNUM, RelationData::rd_smgr, RelationOpenSmgr, and smgrnblocks().

Referenced by SampleHeapTupleVisible(), and table_scan_sample_next_tuple().

513 {
514  uint64 nblocks = 0;
515 
516  /* Open it at the smgr level if not already done */
517  RelationOpenSmgr(rel);
518 
519  /* InvalidForkNumber indicates returning the size for all forks */
520  if (forkNumber == InvalidForkNumber)
521  {
522  for (int i = 0; i < MAX_FORKNUM; i++)
523  nblocks += smgrnblocks(rel->rd_smgr, i);
524  }
525  else
526  nblocks = smgrnblocks(rel->rd_smgr, forkNumber);
527 
528  return nblocks * BLCKSZ;
529 }
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
#define RelationOpenSmgr(relation)
Definition: rel.h:513
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:538
#define MAX_FORKNUM
Definition: relpath.h:55
int i

◆ table_index_fetch_tuple_check()

bool table_index_fetch_tuple_check ( Relation  rel,
ItemPointer  tid,
Snapshot  snapshot,
bool all_dead 
)

Definition at line 205 of file tableam.c.

References ExecDropSingleTupleTableSlot(), table_index_fetch_begin(), table_index_fetch_end(), table_index_fetch_tuple(), and table_slot_create().

Referenced by _bt_check_unique(), and table_index_fetch_tuple().

209 {
210  IndexFetchTableData *scan;
211  TupleTableSlot *slot;
212  bool call_again = false;
213  bool found;
214 
215  slot = table_slot_create(rel, NULL);
216  scan = table_index_fetch_begin(rel);
217  found = table_index_fetch_tuple(scan, tid, snapshot, slot, &call_again,
218  all_dead);
219  table_index_fetch_end(scan);
221 
222  return found;
223 }
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:77
static IndexFetchTableData * table_index_fetch_begin(Relation rel)
Definition: tableam.h:965
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
static bool table_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
Definition: tableam.h:1014
static void table_index_fetch_end(struct IndexFetchTableData *scan)
Definition: tableam.h:984

◆ table_parallelscan_estimate()

Size table_parallelscan_estimate ( Relation  rel,
Snapshot  snapshot 
)

Definition at line 126 of file tableam.c.

References add_size(), Assert, EstimateSnapshotSpace(), IsMVCCSnapshot, TableAmRoutine::parallelscan_estimate, RelationData::rd_tableam, and SnapshotAny.

Referenced by _bt_parallel_estimate_shared(), ExecSeqScanEstimate(), and table_scan_getnextslot().

127 {
128  Size sz = 0;
129 
130  if (IsMVCCSnapshot(snapshot))
131  sz = add_size(sz, EstimateSnapshotSpace(snapshot));
132  else
133  Assert(snapshot == SnapshotAny);
134 
135  sz = add_size(sz, rel->rd_tableam->parallelscan_estimate(rel));
136 
137  return sz;
138 }
Size(* parallelscan_estimate)(Relation rel)
Definition: tableam.h:237
Size EstimateSnapshotSpace(Snapshot snap)
Definition: snapmgr.c:2078
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define Assert(condition)
Definition: c.h:738
size_t Size
Definition: c.h:466
#define SnapshotAny
Definition: snapmgr.h:69

◆ table_parallelscan_initialize()

void table_parallelscan_initialize ( Relation  rel,
ParallelTableScanDesc  pscan,
Snapshot  snapshot 
)

Definition at line 141 of file tableam.c.

References Assert, IsMVCCSnapshot, TableAmRoutine::parallelscan_initialize, ParallelTableScanDescData::phs_snapshot_any, ParallelTableScanDescData::phs_snapshot_off, RelationData::rd_tableam, SerializeSnapshot(), and SnapshotAny.

Referenced by _bt_begin_parallel(), ExecSeqScanInitializeDSM(), and table_scan_getnextslot().

143 {
144  Size snapshot_off = rel->rd_tableam->parallelscan_initialize(rel, pscan);
145 
146  pscan->phs_snapshot_off = snapshot_off;
147 
148  if (IsMVCCSnapshot(snapshot))
149  {
150  SerializeSnapshot(snapshot, (char *) pscan + pscan->phs_snapshot_off);
151  pscan->phs_snapshot_any = false;
152  }
153  else
154  {
155  Assert(snapshot == SnapshotAny);
156  pscan->phs_snapshot_any = true;
157  }
158 }
Size(* parallelscan_initialize)(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.h:244
void SerializeSnapshot(Snapshot snapshot, char *start_address)
Definition: snapmgr.c:2102
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define Assert(condition)
Definition: c.h:738
size_t Size
Definition: c.h:466
#define SnapshotAny
Definition: snapmgr.h:69

◆ table_scan_update_snapshot()

void table_scan_update_snapshot ( TableScanDesc  scan,
Snapshot  snapshot 
)

Definition at line 110 of file tableam.c.

References Assert, IsMVCCSnapshot, RegisterSnapshot(), TableScanDescData::rs_flags, TableScanDescData::rs_snapshot, and SO_TEMP_SNAPSHOT.

Referenced by ExecBitmapHeapInitializeWorker(), and table_rescan_set_params().

111 {
112  Assert(IsMVCCSnapshot(snapshot));
113 
114  RegisterSnapshot(snapshot);
115  scan->rs_snapshot = snapshot;
116  scan->rs_flags |= SO_TEMP_SNAPSHOT;
117 }
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:865
uint32 rs_flags
Definition: relscan.h:43
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define Assert(condition)
Definition: c.h:738
struct SnapshotData * rs_snapshot
Definition: relscan.h:35

◆ table_slot_callbacks()

const TupleTableSlotOps* table_slot_callbacks ( Relation  relation)

Definition at line 44 of file tableam.c.

References Assert, RelationData::rd_rel, RelationData::rd_tableam, TableAmRoutine::slot_callbacks, TTSOpsHeapTuple, and TTSOpsVirtual.

Referenced by ATRewriteTable(), ExecGetReturningSlot(), ExecGetTriggerNewSlot(), ExecGetTriggerOldSlot(), ExecInitBitmapHeapScan(), ExecInitIndexOnlyScan(), ExecInitIndexScan(), ExecInitModifyTable(), ExecInitSampleScan(), ExecInitSeqScan(), ExecInitTidScan(), and table_slot_create().

45 {
46  const TupleTableSlotOps *tts_cb;
47 
48  if (relation->rd_tableam)
49  tts_cb = relation->rd_tableam->slot_callbacks(relation);
50  else if (relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
51  {
52  /*
53  * Historically FDWs expect to store heap tuples in slots. Continue
54  * handing them one, to make it less painful to adapt FDWs to new
55  * versions. The cost of a heap slot over a virtual slot is pretty
56  * small.
57  */
58  tts_cb = &TTSOpsHeapTuple;
59  }
60  else
61  {
62  /*
63  * These need to be supported, as some parts of the code (like COPY)
64  * need to create slots for such relations too. It seems better to
65  * centralize the knowledge that a heap slot is the right thing in
66  * that case here.
67  */
68  Assert(relation->rd_rel->relkind == RELKIND_VIEW ||
69  relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE);
70  tts_cb = &TTSOpsVirtual;
71  }
72 
73  return tts_cb;
74 }
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
Form_pg_class rd_rel
Definition: rel.h:109
const TupleTableSlotOps *(* slot_callbacks)(Relation rel)
Definition: tableam.h:176
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define Assert(condition)
Definition: c.h:738
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:84

◆ table_slot_create()

TupleTableSlot* table_slot_create ( Relation  relation,
List **  reglist 
)

Definition at line 77 of file tableam.c.

References lappend(), MakeSingleTupleTableSlot(), RelationGetDescr, and table_slot_callbacks().

Referenced by acquire_sample_rows(), AlterDomainNotNull(), apply_handle_tuple_routing(), check_default_partition_contents(), check_exclusion_or_unique_constraint(), CopyFrom(), CopyMultiInsertInfoNextFreeSlot(), CopyTo(), DefineQueryRewrite(), EvalPlanQualSlot(), ExecInitModifyTable(), ExecInitPartitionInfo(), ExecInitRoutingInfo(), FindReplTupleInLocalRel(), get_actual_variable_range(), heapam_index_build_range_scan(), heapam_relation_copy_for_cluster(), IndexCheckExclusion(), RelationFindReplTupleSeq(), systable_beginscan(), systable_beginscan_ordered(), table_index_fetch_tuple_check(), unique_key_recheck(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

78 {
79  const TupleTableSlotOps *tts_cb;
80  TupleTableSlot *slot;
81 
82  tts_cb = table_slot_callbacks(relation);
83  slot = MakeSingleTupleTableSlot(RelationGetDescr(relation), tts_cb);
84 
85  if (reglist)
86  *reglist = lappend(*reglist, slot);
87 
88  return slot;
89 }
#define RelationGetDescr(relation)
Definition: rel.h:482
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1208
const TupleTableSlotOps * table_slot_callbacks(Relation relation)
Definition: tableam.c:44
List * lappend(List *list, void *datum)
Definition: list.c:321

◆ table_tuple_get_latest_tid()

void table_tuple_get_latest_tid ( TableScanDesc  scan,
ItemPointer  tid 
)

Definition at line 232 of file tableam.c.

References ereport, errcode(), errmsg(), ERROR, ItemPointerGetBlockNumberNoCheck, ItemPointerGetOffsetNumberNoCheck, RelationData::rd_tableam, RelationGetRelationName, TableScanDescData::rs_rd, TableAmRoutine::tuple_get_latest_tid, and TableAmRoutine::tuple_tid_valid.

Referenced by currtid_byrelname(), currtid_byreloid(), table_tuple_tid_valid(), and TidNext().

233 {
234  Relation rel = scan->rs_rd;
235  const TableAmRoutine *tableam = rel->rd_tableam;
236 
237  /*
238  * Since this can be called with user-supplied TID, don't trust the input
239  * too much.
240  */
241  if (!tableam->tuple_tid_valid(scan, tid))
242  ereport(ERROR,
243  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
244  errmsg("tid (%u, %u) is not valid for relation \"%s\"",
247  RelationGetRelationName(rel))));
248 
249  tableam->tuple_get_latest_tid(scan, tid);
250 }
#define ItemPointerGetOffsetNumberNoCheck(pointer)
Definition: itemptr.h:108
int errcode(int sqlerrcode)
Definition: elog.c:610
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:490
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define ereport(elevel,...)
Definition: elog.h:144
void(* tuple_get_latest_tid)(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:333
Relation rs_rd
Definition: relscan.h:34
bool(* tuple_tid_valid)(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:326
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define ItemPointerGetBlockNumberNoCheck(pointer)
Definition: itemptr.h:89

Variable Documentation

◆ default_table_access_method

char* default_table_access_method = DEFAULT_TABLE_ACCESS_METHOD

Definition at line 34 of file tableam.c.

Referenced by DefineRelation().

◆ synchronize_seqscans

bool synchronize_seqscans = true

Definition at line 35 of file tableam.c.

Referenced by initscan(), and table_block_parallelscan_initialize().