PostgreSQL Source Code git master
tableam.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/syncscan.h"
#include "access/tableam.h"
#include "access/xact.h"
#include "optimizer/optimizer.h"
#include "optimizer/plancat.h"
#include "port/pg_bitutils.h"
#include "storage/bufmgr.h"
#include "storage/shmem.h"
#include "storage/smgr.h"
Include dependency graph for tableam.c:

Go to the source code of this file.

Macros

#define PARALLEL_SEQSCAN_NCHUNKS   2048
 
#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS   64
 
#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE   8192
 

Functions

const TupleTableSlotOpstable_slot_callbacks (Relation relation)
 
TupleTableSlottable_slot_create (Relation relation, List **reglist)
 
TableScanDesc table_beginscan_catalog (Relation relation, int nkeys, ScanKeyData *key)
 
Size table_parallelscan_estimate (Relation rel, Snapshot snapshot)
 
void table_parallelscan_initialize (Relation rel, ParallelTableScanDesc pscan, Snapshot snapshot)
 
TableScanDesc table_beginscan_parallel (Relation relation, ParallelTableScanDesc pscan)
 
TableScanDesc table_beginscan_parallel_tidrange (Relation relation, ParallelTableScanDesc pscan)
 
bool table_index_fetch_tuple_check (Relation rel, ItemPointer tid, Snapshot snapshot, bool *all_dead)
 
void table_tuple_get_latest_tid (TableScanDesc scan, ItemPointer tid)
 
void simple_table_tuple_insert (Relation rel, TupleTableSlot *slot)
 
void simple_table_tuple_delete (Relation rel, ItemPointer tid, Snapshot snapshot)
 
void simple_table_tuple_update (Relation rel, ItemPointer otid, TupleTableSlot *slot, Snapshot snapshot, TU_UpdateIndexes *update_indexes)
 
Size table_block_parallelscan_estimate (Relation rel)
 
Size table_block_parallelscan_initialize (Relation rel, ParallelTableScanDesc pscan)
 
void table_block_parallelscan_reinitialize (Relation rel, ParallelTableScanDesc pscan)
 
void table_block_parallelscan_startblock_init (Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan, BlockNumber startblock, BlockNumber numblocks)
 
BlockNumber table_block_parallelscan_nextpage (Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
 
uint64 table_block_relation_size (Relation rel, ForkNumber forkNumber)
 
void table_block_relation_estimate_size (Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
 

Variables

char * default_table_access_method = DEFAULT_TABLE_ACCESS_METHOD
 
bool synchronize_seqscans = true
 

Macro Definition Documentation

◆ PARALLEL_SEQSCAN_MAX_CHUNK_SIZE

#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE   8192

Definition at line 46 of file tableam.c.

◆ PARALLEL_SEQSCAN_NCHUNKS

#define PARALLEL_SEQSCAN_NCHUNKS   2048

Definition at line 42 of file tableam.c.

◆ PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS

#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS   64

Definition at line 44 of file tableam.c.

Function Documentation

◆ simple_table_tuple_delete()

void simple_table_tuple_delete ( Relation  rel,
ItemPointer  tid,
Snapshot  snapshot 
)

Definition at line 322 of file tableam.c.

323{
324 TM_Result result;
325 TM_FailureData tmfd;
326
327 result = table_tuple_delete(rel, tid,
329 snapshot, InvalidSnapshot,
330 true /* wait for commit */ ,
331 &tmfd, false /* changingPart */ );
332
333 switch (result)
334 {
335 case TM_SelfModified:
336 /* Tuple was already updated in current command? */
337 elog(ERROR, "tuple already updated by self");
338 break;
339
340 case TM_Ok:
341 /* done successfully */
342 break;
343
344 case TM_Updated:
345 elog(ERROR, "tuple concurrently updated");
346 break;
347
348 case TM_Deleted:
349 elog(ERROR, "tuple concurrently deleted");
350 break;
351
352 default:
353 elog(ERROR, "unrecognized table_tuple_delete status: %u", result);
354 break;
355 }
356}
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define InvalidSnapshot
Definition: snapshot.h:119
TM_Result
Definition: tableam.h:73
@ TM_Ok
Definition: tableam.h:78
@ TM_Deleted
Definition: tableam.h:93
@ TM_Updated
Definition: tableam.h:90
@ TM_SelfModified
Definition: tableam.h:84
static TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: tableam.h:1467
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:830

References elog, ERROR, GetCurrentCommandId(), InvalidSnapshot, table_tuple_delete(), TM_Deleted, TM_Ok, TM_SelfModified, and TM_Updated.

Referenced by ExecSimpleRelationDelete().

◆ simple_table_tuple_insert()

void simple_table_tuple_insert ( Relation  rel,
TupleTableSlot slot 
)

Definition at line 308 of file tableam.c.

309{
310 table_tuple_insert(rel, slot, GetCurrentCommandId(true), 0, NULL);
311}
static void table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int options, BulkInsertStateData *bistate)
Definition: tableam.h:1377

References GetCurrentCommandId(), and table_tuple_insert().

Referenced by ExecSimpleRelationInsert().

◆ simple_table_tuple_update()

void simple_table_tuple_update ( Relation  rel,
ItemPointer  otid,
TupleTableSlot slot,
Snapshot  snapshot,
TU_UpdateIndexes update_indexes 
)

Definition at line 367 of file tableam.c.

371{
372 TM_Result result;
373 TM_FailureData tmfd;
374 LockTupleMode lockmode;
375
376 result = table_tuple_update(rel, otid, slot,
378 snapshot, InvalidSnapshot,
379 true /* wait for commit */ ,
380 &tmfd, &lockmode, update_indexes);
381
382 switch (result)
383 {
384 case TM_SelfModified:
385 /* Tuple was already updated in current command? */
386 elog(ERROR, "tuple already updated by self");
387 break;
388
389 case TM_Ok:
390 /* done successfully */
391 break;
392
393 case TM_Updated:
394 elog(ERROR, "tuple concurrently updated");
395 break;
396
397 case TM_Deleted:
398 elog(ERROR, "tuple concurrently deleted");
399 break;
400
401 default:
402 elog(ERROR, "unrecognized table_tuple_update status: %u", result);
403 break;
404 }
405}
LockTupleMode
Definition: lockoptions.h:50
static TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
Definition: tableam.h:1512

References elog, ERROR, GetCurrentCommandId(), InvalidSnapshot, table_tuple_update(), TM_Deleted, TM_Ok, TM_SelfModified, and TM_Updated.

Referenced by ExecSimpleRelationUpdate().

◆ table_beginscan_catalog()

TableScanDesc table_beginscan_catalog ( Relation  relation,
int  nkeys,
ScanKeyData key 
)

Definition at line 113 of file tableam.c.

114{
115 uint32 flags = SO_TYPE_SEQSCAN |
117 Oid relid = RelationGetRelid(relation);
119
120 return relation->rd_tableam->scan_begin(relation, snapshot, nkeys, key,
121 NULL, flags);
122}
uint32_t uint32
Definition: c.h:541
unsigned int Oid
Definition: postgres_ext.h:32
#define RelationGetRelid(relation)
Definition: rel.h:515
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:385
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:824
const struct TableAmRoutine * rd_tableam
Definition: rel.h:189
TableScanDesc(* scan_begin)(Relation rel, Snapshot snapshot, int nkeys, ScanKeyData *key, ParallelTableScanDesc pscan, uint32 flags)
Definition: tableam.h:327
@ SO_ALLOW_STRAT
Definition: tableam.h:58
@ SO_TEMP_SNAPSHOT
Definition: tableam.h:65
@ SO_ALLOW_PAGEMODE
Definition: tableam.h:62
@ SO_ALLOW_SYNC
Definition: tableam.h:60
@ SO_TYPE_SEQSCAN
Definition: tableam.h:49

References GetCatalogSnapshot(), sort-test::key, RelationData::rd_tableam, RegisterSnapshot(), RelationGetRelid, TableAmRoutine::scan_begin, SO_ALLOW_PAGEMODE, SO_ALLOW_STRAT, SO_ALLOW_SYNC, SO_TEMP_SNAPSHOT, and SO_TYPE_SEQSCAN.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), check_db_file_conflict(), CreateDatabaseUsingFileCopy(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllPublicationRelations(), getRelationsInNamespace(), GetSchemaPublicationRelations(), objectsInSchemaToOids(), populate_typ_list(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

◆ table_beginscan_parallel()

TableScanDesc table_beginscan_parallel ( Relation  relation,
ParallelTableScanDesc  pscan 
)

Definition at line 166 of file tableam.c.

167{
168 Snapshot snapshot;
169 uint32 flags = SO_TYPE_SEQSCAN |
171
173
174 if (!pscan->phs_snapshot_any)
175 {
176 /* Snapshot was serialized -- restore it */
177 snapshot = RestoreSnapshot((char *) pscan + pscan->phs_snapshot_off);
178 RegisterSnapshot(snapshot);
179 flags |= SO_TEMP_SNAPSHOT;
180 }
181 else
182 {
183 /* SnapshotAny passed by caller (not serialized) */
184 snapshot = SnapshotAny;
185 }
186
187 return relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
188 pscan, flags);
189}
Assert(PointerIsAligned(start, uint64))
#define RelFileLocatorEquals(locator1, locator2)
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:1793
#define SnapshotAny
Definition: snapmgr.h:33
RelFileLocator phs_locator
Definition: relscan.h:82
RelFileLocator rd_locator
Definition: rel.h:57

References Assert(), ParallelTableScanDescData::phs_locator, ParallelTableScanDescData::phs_snapshot_any, ParallelTableScanDescData::phs_snapshot_off, RelationData::rd_locator, RelationData::rd_tableam, RegisterSnapshot(), RelFileLocatorEquals, RestoreSnapshot(), TableAmRoutine::scan_begin, SnapshotAny, SO_ALLOW_PAGEMODE, SO_ALLOW_STRAT, SO_ALLOW_SYNC, SO_TEMP_SNAPSHOT, and SO_TYPE_SEQSCAN.

Referenced by _brin_parallel_scan_and_build(), _bt_parallel_scan_and_sort(), _gin_parallel_scan_and_build(), ExecSeqScanInitializeDSM(), and ExecSeqScanInitializeWorker().

◆ table_beginscan_parallel_tidrange()

TableScanDesc table_beginscan_parallel_tidrange ( Relation  relation,
ParallelTableScanDesc  pscan 
)

Definition at line 192 of file tableam.c.

194{
195 Snapshot snapshot;
197 TableScanDesc sscan;
198
200
201 /* disable syncscan in parallel tid range scan. */
202 pscan->phs_syncscan = false;
203
204 if (!pscan->phs_snapshot_any)
205 {
206 /* Snapshot was serialized -- restore it */
207 snapshot = RestoreSnapshot((char *) pscan + pscan->phs_snapshot_off);
208 RegisterSnapshot(snapshot);
209 flags |= SO_TEMP_SNAPSHOT;
210 }
211 else
212 {
213 /* SnapshotAny passed by caller (not serialized) */
214 snapshot = SnapshotAny;
215 }
216
217 sscan = relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
218 pscan, flags);
219 return sscan;
220}
@ SO_TYPE_TIDRANGESCAN
Definition: tableam.h:53

References Assert(), ParallelTableScanDescData::phs_locator, ParallelTableScanDescData::phs_snapshot_any, ParallelTableScanDescData::phs_snapshot_off, ParallelTableScanDescData::phs_syncscan, RelationData::rd_locator, RelationData::rd_tableam, RegisterSnapshot(), RelFileLocatorEquals, RestoreSnapshot(), TableAmRoutine::scan_begin, SnapshotAny, SO_ALLOW_PAGEMODE, SO_TEMP_SNAPSHOT, and SO_TYPE_TIDRANGESCAN.

Referenced by ExecTidRangeScanInitializeDSM(), and ExecTidRangeScanInitializeWorker().

◆ table_block_parallelscan_estimate()

Size table_block_parallelscan_estimate ( Relation  rel)

Definition at line 414 of file tableam.c.

415{
416 return sizeof(ParallelBlockTableScanDescData);
417}
struct ParallelBlockTableScanDescData ParallelBlockTableScanDescData

◆ table_block_parallelscan_initialize()

Size table_block_parallelscan_initialize ( Relation  rel,
ParallelTableScanDesc  pscan 
)

Definition at line 420 of file tableam.c.

421{
423
424 bpscan->base.phs_locator = rel->rd_locator;
426 /* compare phs_syncscan initialization to similar logic in initscan */
429 bpscan->phs_nblocks > NBuffers / 4;
430 SpinLockInit(&bpscan->phs_mutex);
434
435 return sizeof(ParallelBlockTableScanDescData);
436}
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:451
#define InvalidBlockNumber
Definition: block.h:33
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:294
int NBuffers
Definition: globals.c:142
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:647
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:104
#define SpinLockInit(lock)
Definition: spin.h:57
pg_atomic_uint64 phs_nallocated
Definition: relscan.h:101
ParallelTableScanDescData base
Definition: relscan.h:94
bool synchronize_seqscans
Definition: tableam.c:50

References ParallelBlockTableScanDescData::base, InvalidBlockNumber, NBuffers, pg_atomic_init_u64(), ParallelTableScanDescData::phs_locator, ParallelBlockTableScanDescData::phs_mutex, ParallelBlockTableScanDescData::phs_nallocated, ParallelBlockTableScanDescData::phs_nblocks, ParallelBlockTableScanDescData::phs_numblock, ParallelBlockTableScanDescData::phs_startblock, ParallelTableScanDescData::phs_syncscan, RelationData::rd_locator, RelationGetNumberOfBlocks, RelationUsesLocalBuffers, SpinLockInit, and synchronize_seqscans.

◆ table_block_parallelscan_nextpage()

BlockNumber table_block_parallelscan_nextpage ( Relation  rel,
ParallelBlockTableScanWorker  pbscanwork,
ParallelBlockTableScanDesc  pbscan 
)

Definition at line 554 of file tableam.c.

557{
558 BlockNumber scan_nblocks;
559 BlockNumber page;
560 uint64 nallocated;
561
562 /*
563 * The logic below allocates block numbers out to parallel workers in a
564 * way that each worker will receive a set of consecutive block numbers to
565 * scan. Earlier versions of this would allocate the next highest block
566 * number to the next worker to call this function. This would generally
567 * result in workers never receiving consecutive block numbers. Some
568 * operating systems would not detect the sequential I/O pattern due to
569 * each backend being a different process which could result in poor
570 * performance due to inefficient or no readahead. To work around this
571 * issue, we now allocate a range of block numbers for each worker and
572 * when they come back for another block, we give them the next one in
573 * that range until the range is complete. When the worker completes the
574 * range of blocks we then allocate another range for it and return the
575 * first block number from that range.
576 *
577 * Here we name these ranges of blocks "chunks". The initial size of
578 * these chunks is determined in table_block_parallelscan_startblock_init
579 * based on the number of blocks to scan. Towards the end of the scan, we
580 * start making reductions in the size of the chunks in order to attempt
581 * to divide the remaining work over all the workers as evenly as
582 * possible.
583 *
584 * Here pbscanwork is local worker memory. phsw_chunk_remaining tracks
585 * the number of blocks remaining in the chunk. When that reaches 0 then
586 * we must allocate a new chunk for the worker.
587 *
588 * phs_nallocated tracks how many blocks have been allocated to workers
589 * already. When phs_nallocated >= rs_nblocks, all blocks have been
590 * allocated.
591 *
592 * Because we use an atomic fetch-and-add to fetch the current value, the
593 * phs_nallocated counter will exceed rs_nblocks, because workers will
594 * still increment the value, when they try to allocate the next block but
595 * all blocks have been allocated already. The counter must be 64 bits
596 * wide because of that, to avoid wrapping around when scan_nblocks is
597 * close to 2^32.
598 *
599 * The actual block to return is calculated by adding the counter to the
600 * starting block number, modulo phs_nblocks.
601 */
602
603 /* First, figure out how many blocks we're planning on scanning */
604 if (pbscan->phs_numblock == InvalidBlockNumber)
605 scan_nblocks = pbscan->phs_nblocks;
606 else
607 scan_nblocks = pbscan->phs_numblock;
608
609 /*
610 * Now check if we have any remaining blocks in a previous chunk for this
611 * worker. We must consume all of the blocks from that before we allocate
612 * a new chunk to the worker.
613 */
614 if (pbscanwork->phsw_chunk_remaining > 0)
615 {
616 /*
617 * Give them the next block in the range and update the remaining
618 * number of blocks.
619 */
620 nallocated = ++pbscanwork->phsw_nallocated;
621 pbscanwork->phsw_chunk_remaining--;
622 }
623 else
624 {
625 /*
626 * When we've only got PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS chunks
627 * remaining in the scan, we half the chunk size. Since we reduce the
628 * chunk size here, we'll hit this again after doing
629 * PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS at the new size. After a few
630 * iterations of this, we'll end up doing the last few blocks with the
631 * chunk size set to 1.
632 */
633 if (pbscanwork->phsw_chunk_size > 1 &&
634 pbscanwork->phsw_nallocated > scan_nblocks -
636 pbscanwork->phsw_chunk_size >>= 1;
637
638 nallocated = pbscanwork->phsw_nallocated =
640 pbscanwork->phsw_chunk_size);
641
642 /*
643 * Set the remaining number of blocks in this chunk so that subsequent
644 * calls from this worker continue on with this chunk until it's done.
645 */
646 pbscanwork->phsw_chunk_remaining = pbscanwork->phsw_chunk_size - 1;
647 }
648
649 /* Check if we've run out of blocks to scan */
650 if (nallocated >= scan_nblocks)
651 page = InvalidBlockNumber; /* all blocks have been allocated */
652 else
653 page = (nallocated + pbscan->phs_startblock) % pbscan->phs_nblocks;
654
655 /*
656 * Report scan location. Normally, we report the current page number.
657 * When we reach the end of the scan, though, we report the starting page,
658 * not the ending page, just so the starting positions for later scans
659 * doesn't slew backwards. We only report the position at the end of the
660 * scan once, though: subsequent callers will report nothing.
661 */
662 if (pbscan->base.phs_syncscan)
663 {
664 if (page != InvalidBlockNumber)
665 ss_report_location(rel, page);
666 else if (nallocated == pbscan->phs_nblocks)
668 }
669
670 return page;
671}
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:530
uint32 BlockNumber
Definition: block.h:31
uint64_t uint64
Definition: c.h:542
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
void ss_report_location(Relation rel, BlockNumber location)
Definition: syncscan.c:289
#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS
Definition: tableam.c:44

References ParallelBlockTableScanDescData::base, if(), InvalidBlockNumber, PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS, pg_atomic_fetch_add_u64(), ParallelBlockTableScanDescData::phs_nallocated, ParallelBlockTableScanDescData::phs_nblocks, ParallelBlockTableScanDescData::phs_numblock, ParallelBlockTableScanDescData::phs_startblock, ParallelTableScanDescData::phs_syncscan, ParallelBlockTableScanWorkerData::phsw_chunk_remaining, ParallelBlockTableScanWorkerData::phsw_chunk_size, ParallelBlockTableScanWorkerData::phsw_nallocated, and ss_report_location().

Referenced by heap_scan_stream_read_next_parallel().

◆ table_block_parallelscan_reinitialize()

void table_block_parallelscan_reinitialize ( Relation  rel,
ParallelTableScanDesc  pscan 
)

Definition at line 439 of file tableam.c.

440{
442
444}
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:483

References pg_atomic_write_u64(), and ParallelBlockTableScanDescData::phs_nallocated.

◆ table_block_parallelscan_startblock_init()

void table_block_parallelscan_startblock_init ( Relation  rel,
ParallelBlockTableScanWorker  pbscanwork,
ParallelBlockTableScanDesc  pbscan,
BlockNumber  startblock,
BlockNumber  numblocks 
)

Definition at line 459 of file tableam.c.

464{
465 BlockNumber sync_startpage = InvalidBlockNumber;
466 BlockNumber scan_nblocks;
467
468 /* Reset the state we use for controlling allocation size. */
469 memset(pbscanwork, 0, sizeof(*pbscanwork));
470
471 StaticAssertStmt(MaxBlockNumber <= 0xFFFFFFFE,
472 "pg_nextpower2_32 may be too small for non-standard BlockNumber width");
473
474retry:
475 /* Grab the spinlock. */
476 SpinLockAcquire(&pbscan->phs_mutex);
477
478 /*
479 * When the caller specified a limit on the number of blocks to scan, set
480 * that in the ParallelBlockTableScanDesc, if it's not been done by
481 * another worker already.
482 */
483 if (numblocks != InvalidBlockNumber &&
485 {
486 pbscan->phs_numblock = numblocks;
487 }
488
489 /*
490 * If the scan's phs_startblock has not yet been initialized, we must do
491 * so now. If a startblock was specified, start there, otherwise if this
492 * is not a synchronized scan, we just start at block 0, but if it is a
493 * synchronized scan, we must get the starting position from the
494 * synchronized scan machinery. We can't hold the spinlock while doing
495 * that, though, so release the spinlock, get the information we need, and
496 * retry. If nobody else has initialized the scan in the meantime, we'll
497 * fill in the value we fetched on the second time through.
498 */
499 if (pbscan->phs_startblock == InvalidBlockNumber)
500 {
501 if (startblock != InvalidBlockNumber)
502 pbscan->phs_startblock = startblock;
503 else if (!pbscan->base.phs_syncscan)
504 pbscan->phs_startblock = 0;
505 else if (sync_startpage != InvalidBlockNumber)
506 pbscan->phs_startblock = sync_startpage;
507 else
508 {
509 SpinLockRelease(&pbscan->phs_mutex);
510 sync_startpage = ss_get_location(rel, pbscan->phs_nblocks);
511 goto retry;
512 }
513 }
514 SpinLockRelease(&pbscan->phs_mutex);
515
516 /*
517 * Figure out how many blocks we're going to scan; either all of them, or
518 * just phs_numblock's worth, if a limit has been imposed.
519 */
520 if (pbscan->phs_numblock == InvalidBlockNumber)
521 scan_nblocks = pbscan->phs_nblocks;
522 else
523 scan_nblocks = pbscan->phs_numblock;
524
525 /*
526 * We determine the chunk size based on scan_nblocks. First we split
527 * scan_nblocks into PARALLEL_SEQSCAN_NCHUNKS chunks then we calculate the
528 * next highest power of 2 number of the result. This means we split the
529 * blocks we're scanning into somewhere between PARALLEL_SEQSCAN_NCHUNKS
530 * and PARALLEL_SEQSCAN_NCHUNKS / 2 chunks.
531 */
532 pbscanwork->phsw_chunk_size = pg_nextpower2_32(Max(scan_nblocks /
534
535 /*
536 * Ensure we don't go over the maximum chunk size with larger tables. This
537 * means we may get much more than PARALLEL_SEQSCAN_NCHUNKS for larger
538 * tables. Too large a chunk size has been shown to be detrimental to
539 * sequential scan performance.
540 */
541 pbscanwork->phsw_chunk_size = Min(pbscanwork->phsw_chunk_size,
543}
#define MaxBlockNumber
Definition: block.h:35
#define Min(x, y)
Definition: c.h:1006
#define Max(x, y)
Definition: c.h:1000
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:940
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks)
Definition: syncscan.c:254
#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE
Definition: tableam.c:46
#define PARALLEL_SEQSCAN_NCHUNKS
Definition: tableam.c:42

References ParallelBlockTableScanDescData::base, InvalidBlockNumber, Max, MaxBlockNumber, Min, PARALLEL_SEQSCAN_MAX_CHUNK_SIZE, PARALLEL_SEQSCAN_NCHUNKS, pg_nextpower2_32(), ParallelBlockTableScanDescData::phs_mutex, ParallelBlockTableScanDescData::phs_nblocks, ParallelBlockTableScanDescData::phs_numblock, ParallelBlockTableScanDescData::phs_startblock, ParallelTableScanDescData::phs_syncscan, ParallelBlockTableScanWorkerData::phsw_chunk_size, SpinLockAcquire, SpinLockRelease, ss_get_location(), and StaticAssertStmt.

Referenced by heap_scan_stream_read_next_parallel().

◆ table_block_relation_estimate_size()

void table_block_relation_estimate_size ( Relation  rel,
int32 attr_widths,
BlockNumber pages,
double *  tuples,
double *  allvisfrac,
Size  overhead_bytes_per_tuple,
Size  usable_bytes_per_page 
)

Definition at line 724 of file tableam.c.

729{
730 BlockNumber curpages;
731 BlockNumber relpages;
732 double reltuples;
733 BlockNumber relallvisible;
734 double density;
735
736 /* it should have storage, so we can call the smgr */
737 curpages = RelationGetNumberOfBlocks(rel);
738
739 /* coerce values in pg_class to more desirable types */
740 relpages = (BlockNumber) rel->rd_rel->relpages;
741 reltuples = (double) rel->rd_rel->reltuples;
742 relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
743
744 /*
745 * HACK: if the relation has never yet been vacuumed, use a minimum size
746 * estimate of 10 pages. The idea here is to avoid assuming a
747 * newly-created table is really small, even if it currently is, because
748 * that may not be true once some data gets loaded into it. Once a vacuum
749 * or analyze cycle has been done on it, it's more reasonable to believe
750 * the size is somewhat stable.
751 *
752 * (Note that this is only an issue if the plan gets cached and used again
753 * after the table has been filled. What we're trying to avoid is using a
754 * nestloop-type plan on a table that has grown substantially since the
755 * plan was made. Normally, autovacuum/autoanalyze will occur once enough
756 * inserts have happened and cause cached-plan invalidation; but that
757 * doesn't happen instantaneously, and it won't happen at all for cases
758 * such as temporary tables.)
759 *
760 * We test "never vacuumed" by seeing whether reltuples < 0.
761 *
762 * If the table has inheritance children, we don't apply this heuristic.
763 * Totally empty parent tables are quite common, so we should be willing
764 * to believe that they are empty.
765 */
766 if (curpages < 10 &&
767 reltuples < 0 &&
768 !rel->rd_rel->relhassubclass)
769 curpages = 10;
770
771 /* report estimated # pages */
772 *pages = curpages;
773 /* quick exit if rel is clearly empty */
774 if (curpages == 0)
775 {
776 *tuples = 0;
777 *allvisfrac = 0;
778 return;
779 }
780
781 /* estimate number of tuples from previous tuple density */
782 if (reltuples >= 0 && relpages > 0)
783 density = reltuples / (double) relpages;
784 else
785 {
786 /*
787 * When we have no data because the relation was never yet vacuumed,
788 * estimate tuple width from attribute datatypes. We assume here that
789 * the pages are completely full, which is OK for tables but is
790 * probably an overestimate for indexes. Fortunately
791 * get_relation_info() can clamp the overestimate to the parent
792 * table's size.
793 *
794 * Note: this code intentionally disregards alignment considerations,
795 * because (a) that would be gilding the lily considering how crude
796 * the estimate is, (b) it creates platform dependencies in the
797 * default plans which are kind of a headache for regression testing,
798 * and (c) different table AMs might use different padding schemes.
799 */
800 int32 tuple_width;
801 int fillfactor;
802
803 /*
804 * Without reltuples/relpages, we also need to consider fillfactor.
805 * The other branch considers it implicitly by calculating density
806 * from actual relpages/reltuples statistics.
807 */
809
810 tuple_width = get_rel_data_width(rel, attr_widths);
811 tuple_width += overhead_bytes_per_tuple;
812 /* note: integer division is intentional here */
813 density = (usable_bytes_per_page * fillfactor / 100) / tuple_width;
814 /* There's at least one row on the page, even with low fillfactor. */
815 density = clamp_row_est(density);
816 }
817 *tuples = rint(density * (double) curpages);
818
819 /*
820 * We use relallvisible as-is, rather than scaling it up like we do for
821 * the pages and tuples counts, on the theory that any pages added since
822 * the last VACUUM are most likely not marked all-visible. But costsize.c
823 * wants it converted to a fraction.
824 */
825 if (relallvisible == 0 || curpages <= 0)
826 *allvisfrac = 0;
827 else if ((double) relallvisible >= curpages)
828 *allvisfrac = 1;
829 else
830 *allvisfrac = (double) relallvisible / curpages;
831}
int32_t int32
Definition: c.h:537
double clamp_row_est(double nrows)
Definition: costsize.c:213
static int fillfactor
Definition: pgbench.c:188
int32 get_rel_data_width(Relation rel, int32 *attr_widths)
Definition: plancat.c:1391
#define RelationGetFillFactor(relation, defaultff)
Definition: rel.h:375
#define HEAP_DEFAULT_FILLFACTOR
Definition: rel.h:361
Form_pg_class rd_rel
Definition: rel.h:111

References clamp_row_est(), fillfactor, get_rel_data_width(), HEAP_DEFAULT_FILLFACTOR, if(), RelationData::rd_rel, RelationGetFillFactor, and RelationGetNumberOfBlocks.

Referenced by heapam_estimate_rel_size().

◆ table_block_relation_size()

uint64 table_block_relation_size ( Relation  rel,
ForkNumber  forkNumber 
)

Definition at line 687 of file tableam.c.

688{
689 uint64 nblocks = 0;
690
691 /* InvalidForkNumber indicates returning the size for all forks */
692 if (forkNumber == InvalidForkNumber)
693 {
694 for (int i = 0; i < MAX_FORKNUM; i++)
695 nblocks += smgrnblocks(RelationGetSmgr(rel), i);
696 }
697 else
698 nblocks = smgrnblocks(RelationGetSmgr(rel), forkNumber);
699
700 return nblocks * BLCKSZ;
701}
int i
Definition: isn.c:77
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:577
@ InvalidForkNumber
Definition: relpath.h:57
#define MAX_FORKNUM
Definition: relpath.h:70
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:819

References i, InvalidForkNumber, MAX_FORKNUM, RelationGetSmgr(), and smgrnblocks().

◆ table_index_fetch_tuple_check()

bool table_index_fetch_tuple_check ( Relation  rel,
ItemPointer  tid,
Snapshot  snapshot,
bool *  all_dead 
)

Definition at line 240 of file tableam.c.

244{
246 TupleTableSlot *slot;
247 bool call_again = false;
248 bool found;
249
250 slot = table_slot_create(rel, NULL);
251 scan = table_index_fetch_begin(rel);
252 found = table_index_fetch_tuple(scan, tid, snapshot, slot, &call_again,
253 all_dead);
256
257 return found;
258}
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1443
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:92
static IndexFetchTableData * table_index_fetch_begin(Relation rel)
Definition: tableam.h:1167
static void table_index_fetch_end(struct IndexFetchTableData *scan)
Definition: tableam.h:1186
static bool table_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
Definition: tableam.h:1216

References ExecDropSingleTupleTableSlot(), table_index_fetch_begin(), table_index_fetch_end(), table_index_fetch_tuple(), and table_slot_create().

Referenced by _bt_check_unique().

◆ table_parallelscan_estimate()

Size table_parallelscan_estimate ( Relation  rel,
Snapshot  snapshot 
)

Definition at line 131 of file tableam.c.

132{
133 Size sz = 0;
134
135 if (IsMVCCSnapshot(snapshot))
136 sz = add_size(sz, EstimateSnapshotSpace(snapshot));
137 else
138 Assert(snapshot == SnapshotAny);
139
140 sz = add_size(sz, rel->rd_tableam->parallelscan_estimate(rel));
141
142 return sz;
143}
size_t Size
Definition: c.h:613
Size add_size(Size s1, Size s2)
Definition: shmem.c:495
Size EstimateSnapshotSpace(Snapshot snapshot)
Definition: snapmgr.c:1712
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:55
Size(* parallelscan_estimate)(Relation rel)
Definition: tableam.h:392

References add_size(), Assert(), EstimateSnapshotSpace(), IsMVCCSnapshot, TableAmRoutine::parallelscan_estimate, RelationData::rd_tableam, and SnapshotAny.

Referenced by _brin_parallel_estimate_shared(), _bt_parallel_estimate_shared(), _gin_parallel_estimate_shared(), ExecSeqScanEstimate(), and ExecTidRangeScanEstimate().

◆ table_parallelscan_initialize()

void table_parallelscan_initialize ( Relation  rel,
ParallelTableScanDesc  pscan,
Snapshot  snapshot 
)

Definition at line 146 of file tableam.c.

148{
149 Size snapshot_off = rel->rd_tableam->parallelscan_initialize(rel, pscan);
150
151 pscan->phs_snapshot_off = snapshot_off;
152
153 if (IsMVCCSnapshot(snapshot))
154 {
155 SerializeSnapshot(snapshot, (char *) pscan + pscan->phs_snapshot_off);
156 pscan->phs_snapshot_any = false;
157 }
158 else
159 {
160 Assert(snapshot == SnapshotAny);
161 pscan->phs_snapshot_any = true;
162 }
163}
void SerializeSnapshot(Snapshot snapshot, char *start_address)
Definition: snapmgr.c:1736
Size(* parallelscan_initialize)(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.h:399

References Assert(), IsMVCCSnapshot, TableAmRoutine::parallelscan_initialize, ParallelTableScanDescData::phs_snapshot_any, ParallelTableScanDescData::phs_snapshot_off, RelationData::rd_tableam, SerializeSnapshot(), and SnapshotAny.

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), _gin_begin_parallel(), ExecSeqScanInitializeDSM(), and ExecTidRangeScanInitializeDSM().

◆ table_slot_callbacks()

const TupleTableSlotOps * table_slot_callbacks ( Relation  relation)

Definition at line 59 of file tableam.c.

60{
61 const TupleTableSlotOps *tts_cb;
62
63 if (relation->rd_tableam)
64 tts_cb = relation->rd_tableam->slot_callbacks(relation);
65 else if (relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
66 {
67 /*
68 * Historically FDWs expect to store heap tuples in slots. Continue
69 * handing them one, to make it less painful to adapt FDWs to new
70 * versions. The cost of a heap slot over a virtual slot is pretty
71 * small.
72 */
73 tts_cb = &TTSOpsHeapTuple;
74 }
75 else
76 {
77 /*
78 * These need to be supported, as some parts of the code (like COPY)
79 * need to create slots for such relations too. It seems better to
80 * centralize the knowledge that a heap slot is the right thing in
81 * that case here.
82 */
83 Assert(relation->rd_rel->relkind == RELKIND_VIEW ||
84 relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE);
85 tts_cb = &TTSOpsVirtual;
86 }
87
88 return tts_cb;
89}
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:84
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:85
const TupleTableSlotOps *(* slot_callbacks)(Relation rel)
Definition: tableam.h:303

References Assert(), RelationData::rd_rel, RelationData::rd_tableam, TableAmRoutine::slot_callbacks, TTSOpsHeapTuple, and TTSOpsVirtual.

Referenced by ATRewriteTable(), ExecGetAllNullSlot(), ExecGetReturningSlot(), ExecGetTriggerNewSlot(), ExecGetTriggerOldSlot(), ExecInitBitmapHeapScan(), ExecInitIndexOnlyScan(), ExecInitIndexScan(), ExecInitSampleScan(), ExecInitSeqScan(), ExecInitTidRangeScan(), ExecInitTidScan(), and table_slot_create().

◆ table_slot_create()

TupleTableSlot * table_slot_create ( Relation  relation,
List **  reglist 
)

Definition at line 92 of file tableam.c.

93{
94 const TupleTableSlotOps *tts_cb;
95 TupleTableSlot *slot;
96
97 tts_cb = table_slot_callbacks(relation);
98 slot = MakeSingleTupleTableSlot(RelationGetDescr(relation), tts_cb);
99
100 if (reglist)
101 *reglist = lappend(*reglist, slot);
102
103 return slot;
104}
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1427
List * lappend(List *list, void *datum)
Definition: list.c:339
#define RelationGetDescr(relation)
Definition: rel.h:541
const TupleTableSlotOps * table_slot_callbacks(Relation relation)
Definition: tableam.c:59

References lappend(), MakeSingleTupleTableSlot(), RelationGetDescr, and table_slot_callbacks().

Referenced by acquire_sample_rows(), apply_handle_tuple_routing(), apply_handle_update_internal(), build_index_value_desc(), check_default_partition_contents(), check_exclusion_or_unique_constraint(), CopyFrom(), CopyMultiInsertInfoNextFreeSlot(), CopyRelationTo(), EvalPlanQualSlot(), ExecCrossPartitionUpdate(), ExecInitInsertProjection(), ExecInitMerge(), ExecInitMergeTupleSlots(), ExecInitModifyTable(), ExecInitPartitionInfo(), ExecInitRoutingInfo(), ExecInitUpdateProjection(), FindConflictTuple(), FindReplTupleInLocalRel(), get_actual_variable_range(), heap_entry_is_visible(), heapam_index_build_range_scan(), heapam_relation_copy_for_cluster(), IndexCheckExclusion(), RelationFindDeletedTupleInfoByIndex(), RelationFindDeletedTupleInfoSeq(), RelationFindReplTupleSeq(), systable_beginscan(), systable_beginscan_ordered(), table_index_fetch_tuple_check(), validateDomainCheckConstraint(), validateDomainNotNullConstraint(), and validateForeignKeyConstraint().

◆ table_tuple_get_latest_tid()

void table_tuple_get_latest_tid ( TableScanDesc  scan,
ItemPointer  tid 
)

Definition at line 267 of file tableam.c.

268{
269 Relation rel = scan->rs_rd;
270 const TableAmRoutine *tableam = rel->rd_tableam;
271
272 /*
273 * We don't expect direct calls to table_tuple_get_latest_tid with valid
274 * CheckXidAlive for catalog or regular tables. See detailed comments in
275 * xact.c where these variables are declared.
276 */
278 elog(ERROR, "unexpected table_tuple_get_latest_tid call during logical decoding");
279
280 /*
281 * Since this can be called with user-supplied TID, don't trust the input
282 * too much.
283 */
284 if (!tableam->tuple_tid_valid(scan, tid))
286 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
287 errmsg("tid (%u, %u) is not valid for relation \"%s\"",
291
292 tableam->tuple_get_latest_tid(scan, tid);
293}
#define unlikely(x)
Definition: c.h:407
int errcode(int sqlerrcode)
Definition: elog.c:863
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define ereport(elevel,...)
Definition: elog.h:150
static OffsetNumber ItemPointerGetOffsetNumberNoCheck(const ItemPointerData *pointer)
Definition: itemptr.h:114
static BlockNumber ItemPointerGetBlockNumberNoCheck(const ItemPointerData *pointer)
Definition: itemptr.h:93
#define RelationGetRelationName(relation)
Definition: rel.h:549
void(* tuple_get_latest_tid)(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:488
bool(* tuple_tid_valid)(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:481
Relation rs_rd
Definition: relscan.h:36
#define TransactionIdIsValid(xid)
Definition: transam.h:41
bool bsysscan
Definition: xact.c:101
TransactionId CheckXidAlive
Definition: xact.c:100

References bsysscan, CheckXidAlive, elog, ereport, errcode(), errmsg(), ERROR, ItemPointerGetBlockNumberNoCheck(), ItemPointerGetOffsetNumberNoCheck(), RelationData::rd_tableam, RelationGetRelationName, TableScanDescData::rs_rd, TransactionIdIsValid, TableAmRoutine::tuple_get_latest_tid, TableAmRoutine::tuple_tid_valid, and unlikely.

Referenced by currtid_internal(), and TidNext().

Variable Documentation

◆ default_table_access_method

char* default_table_access_method = DEFAULT_TABLE_ACCESS_METHOD

Definition at line 49 of file tableam.c.

Referenced by ATPrepSetAccessMethod(), and DefineRelation().

◆ synchronize_seqscans

bool synchronize_seqscans = true

Definition at line 50 of file tableam.c.

Referenced by initscan(), and table_block_parallelscan_initialize().