41#define PARALLEL_SEQSCAN_NCHUNKS 2048
43#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS 64
45#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE 8192
64 else if (relation->
rd_rel->relkind == RELKIND_FOREIGN_TABLE)
83 relation->
rd_rel->relkind == RELKIND_PARTITIONED_TABLE);
100 *reglist =
lappend(*reglist, slot);
215 bool call_again =
false;
246 elog(
ERROR,
"unexpected table_tuple_get_latest_tid call during logical decoding");
254 (
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
255 errmsg(
"tid (%u, %u) is not valid for relation \"%s\"",
305 elog(
ERROR,
"tuple already updated by self");
313 elog(
ERROR,
"tuple concurrently updated");
317 elog(
ERROR,
"tuple concurrently deleted");
321 elog(
ERROR,
"unrecognized table_tuple_delete status: %u", result);
348 &tmfd, &lockmode, update_indexes);
354 elog(
ERROR,
"tuple already updated by self");
362 elog(
ERROR,
"tuple concurrently updated");
366 elog(
ERROR,
"tuple concurrently deleted");
370 elog(
ERROR,
"unrecognized table_tuple_update status: %u", result);
428 memset(pbscanwork, 0,
sizeof(*pbscanwork));
431 "pg_nextpower2_32 may be too small for non-standard BlockNumber width");
629 return nblocks * BLCKSZ;
656 Size overhead_bytes_per_tuple,
657 Size usable_bytes_per_page)
670 reltuples = (
double) rel->
rd_rel->reltuples;
697 !rel->
rd_rel->relhassubclass)
711 if (reltuples >= 0 && relpages > 0)
712 density = reltuples / (double) relpages;
740 tuple_width += overhead_bytes_per_tuple;
742 density = (usable_bytes_per_page *
fillfactor / 100) / tuple_width;
744 *tuples = rint(density * (
double) curpages);
752 if (relallvisible == 0 || curpages <= 0)
754 else if ((
double) relallvisible >= curpages)
757 *allvisfrac = (double) relallvisible / curpages;
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
#define InvalidBlockNumber
#define RelationGetNumberOfBlocks(reln)
#define Assert(condition)
#define StaticAssertStmt(condition, errmessage)
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
const TupleTableSlotOps TTSOpsVirtual
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
const TupleTableSlotOps TTSOpsHeapTuple
if(TABLE==NULL||TABLE_index==NULL)
static OffsetNumber ItemPointerGetOffsetNumberNoCheck(const ItemPointerData *pointer)
static BlockNumber ItemPointerGetBlockNumberNoCheck(const ItemPointerData *pointer)
List * lappend(List *list, void *datum)
static uint32 pg_nextpower2_32(uint32 num)
int32 get_rel_data_width(Relation rel, int32 *attr_widths)
#define RelationGetRelid(relation)
static SMgrRelation RelationGetSmgr(Relation rel)
#define RelationGetDescr(relation)
#define RelationGetFillFactor(relation, defaultff)
#define RelationGetRelationName(relation)
#define RelationUsesLocalBuffers(relation)
#define HEAP_DEFAULT_FILLFACTOR
#define RelFileLocatorEquals(locator1, locator2)
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
struct ParallelBlockTableScanDescData ParallelBlockTableScanDescData
Size add_size(Size s1, Size s2)
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
void SerializeSnapshot(Snapshot snapshot, char *start_address)
Snapshot GetCatalogSnapshot(Oid relid)
Snapshot RestoreSnapshot(char *start_address)
Snapshot RegisterSnapshot(Snapshot snapshot)
Size EstimateSnapshotSpace(Snapshot snapshot)
#define IsMVCCSnapshot(snapshot)
#define SpinLockInit(lock)
#define SpinLockRelease(lock)
#define SpinLockAcquire(lock)
BlockNumber phs_startblock
pg_atomic_uint64 phs_nallocated
ParallelTableScanDescData base
uint32 phsw_chunk_remaining
RelFileLocator phs_locator
const struct TableAmRoutine * rd_tableam
RelFileLocator rd_locator
Size(* parallelscan_initialize)(Relation rel, ParallelTableScanDesc pscan)
void(* tuple_get_latest_tid)(TableScanDesc scan, ItemPointer tid)
const TupleTableSlotOps *(* slot_callbacks)(Relation rel)
TableScanDesc(* scan_begin)(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, ParallelTableScanDesc pscan, uint32 flags)
bool(* tuple_tid_valid)(TableScanDesc scan, ItemPointer tid)
Size(* parallelscan_estimate)(Relation rel)
void ss_report_location(Relation rel, BlockNumber location)
BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks)
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE
void simple_table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, Snapshot snapshot, TU_UpdateIndexes *update_indexes)
bool table_index_fetch_tuple_check(Relation rel, ItemPointer tid, Snapshot snapshot, bool *all_dead)
Size table_block_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan)
TableScanDesc table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot)
#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS
void table_block_parallelscan_startblock_init(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
char * default_table_access_method
void table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid)
void simple_table_tuple_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
void table_block_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
uint64 table_block_relation_size(Relation rel, ForkNumber forkNumber)
Size table_parallelscan_estimate(Relation rel, Snapshot snapshot)
Size table_block_parallelscan_estimate(Relation rel)
#define PARALLEL_SEQSCAN_NCHUNKS
void table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan, Snapshot snapshot)
const TupleTableSlotOps * table_slot_callbacks(Relation relation)
BlockNumber table_block_parallelscan_nextpage(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
void table_block_relation_estimate_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
bool synchronize_seqscans
#define DEFAULT_TABLE_ACCESS_METHOD
static IndexFetchTableData * table_index_fetch_begin(Relation rel)
static TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
static void table_index_fetch_end(struct IndexFetchTableData *scan)
static TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
static bool table_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
static void table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate)
#define TransactionIdIsValid(xid)
TransactionId CheckXidAlive
CommandId GetCurrentCommandId(bool used)