PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
tableam.c
Go to the documentation of this file.
1/*----------------------------------------------------------------------
2 *
3 * tableam.c
4 * Table access method routines too big to be inline functions.
5 *
6 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/table/tableam.c
12 *
13 * NOTES
14 * Note that most function in here are documented in tableam.h, rather than
15 * here. That's because there's a lot of inline functions in tableam.h and
16 * it'd be harder to understand if one constantly had to switch between files.
17 *
18 *----------------------------------------------------------------------
19 */
20#include "postgres.h"
21
22#include <math.h>
23
24#include "access/syncscan.h"
25#include "access/tableam.h"
26#include "access/xact.h"
27#include "optimizer/plancat.h"
28#include "port/pg_bitutils.h"
29#include "storage/bufmgr.h"
30#include "storage/shmem.h"
31#include "storage/smgr.h"
32
33/*
34 * Constants to control the behavior of block allocation to parallel workers
35 * during a parallel seqscan. Technically these values do not need to be
36 * powers of 2, but having them as powers of 2 makes the math more optimal
37 * and makes the ramp-down stepping more even.
38 */
39
40/* The number of I/O chunks we try to break a parallel seqscan down into */
41#define PARALLEL_SEQSCAN_NCHUNKS 2048
42/* Ramp down size of allocations when we've only this number of chunks left */
43#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS 64
44/* Cap the size of parallel I/O chunks to this number of blocks */
45#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE 8192
46
47/* GUC variables */
50
51
52/* ----------------------------------------------------------------------------
53 * Slot functions.
54 * ----------------------------------------------------------------------------
55 */
56
59{
60 const TupleTableSlotOps *tts_cb;
61
62 if (relation->rd_tableam)
63 tts_cb = relation->rd_tableam->slot_callbacks(relation);
64 else if (relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
65 {
66 /*
67 * Historically FDWs expect to store heap tuples in slots. Continue
68 * handing them one, to make it less painful to adapt FDWs to new
69 * versions. The cost of a heap slot over a virtual slot is pretty
70 * small.
71 */
72 tts_cb = &TTSOpsHeapTuple;
73 }
74 else
75 {
76 /*
77 * These need to be supported, as some parts of the code (like COPY)
78 * need to create slots for such relations too. It seems better to
79 * centralize the knowledge that a heap slot is the right thing in
80 * that case here.
81 */
82 Assert(relation->rd_rel->relkind == RELKIND_VIEW ||
83 relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE);
84 tts_cb = &TTSOpsVirtual;
85 }
86
87 return tts_cb;
88}
89
91table_slot_create(Relation relation, List **reglist)
92{
93 const TupleTableSlotOps *tts_cb;
94 TupleTableSlot *slot;
95
96 tts_cb = table_slot_callbacks(relation);
97 slot = MakeSingleTupleTableSlot(RelationGetDescr(relation), tts_cb);
98
99 if (reglist)
100 *reglist = lappend(*reglist, slot);
101
102 return slot;
103}
104
105
106/* ----------------------------------------------------------------------------
107 * Table scan functions.
108 * ----------------------------------------------------------------------------
109 */
110
112table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
113{
114 uint32 flags = SO_TYPE_SEQSCAN |
116 Oid relid = RelationGetRelid(relation);
118
119 return relation->rd_tableam->scan_begin(relation, snapshot, nkeys, key,
120 NULL, flags);
121}
122
123
124/* ----------------------------------------------------------------------------
125 * Parallel table scan related functions.
126 * ----------------------------------------------------------------------------
127 */
128
129Size
131{
132 Size sz = 0;
133
134 if (IsMVCCSnapshot(snapshot))
135 sz = add_size(sz, EstimateSnapshotSpace(snapshot));
136 else
137 Assert(snapshot == SnapshotAny);
138
139 sz = add_size(sz, rel->rd_tableam->parallelscan_estimate(rel));
140
141 return sz;
142}
143
144void
146 Snapshot snapshot)
147{
148 Size snapshot_off = rel->rd_tableam->parallelscan_initialize(rel, pscan);
149
150 pscan->phs_snapshot_off = snapshot_off;
151
152 if (IsMVCCSnapshot(snapshot))
153 {
154 SerializeSnapshot(snapshot, (char *) pscan + pscan->phs_snapshot_off);
155 pscan->phs_snapshot_any = false;
156 }
157 else
158 {
159 Assert(snapshot == SnapshotAny);
160 pscan->phs_snapshot_any = true;
161 }
162}
163
166{
167 Snapshot snapshot;
168 uint32 flags = SO_TYPE_SEQSCAN |
170
172
173 if (!pscan->phs_snapshot_any)
174 {
175 /* Snapshot was serialized -- restore it */
176 snapshot = RestoreSnapshot((char *) pscan + pscan->phs_snapshot_off);
177 RegisterSnapshot(snapshot);
178 flags |= SO_TEMP_SNAPSHOT;
179 }
180 else
181 {
182 /* SnapshotAny passed by caller (not serialized) */
183 snapshot = SnapshotAny;
184 }
185
186 return relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
187 pscan, flags);
188}
189
190
191/* ----------------------------------------------------------------------------
192 * Index scan related functions.
193 * ----------------------------------------------------------------------------
194 */
195
196/*
197 * To perform that check simply start an index scan, create the necessary
198 * slot, do the heap lookup, and shut everything down again. This could be
199 * optimized, but is unlikely to matter from a performance POV. If there
200 * frequently are live index pointers also matching a unique index key, the
201 * CPU overhead of this routine is unlikely to matter.
202 *
203 * Note that *tid may be modified when we return true if the AM supports
204 * storing multiple row versions reachable via a single index entry (like
205 * heap's HOT).
206 */
207bool
209 ItemPointer tid,
210 Snapshot snapshot,
211 bool *all_dead)
212{
214 TupleTableSlot *slot;
215 bool call_again = false;
216 bool found;
217
218 slot = table_slot_create(rel, NULL);
219 scan = table_index_fetch_begin(rel);
220 found = table_index_fetch_tuple(scan, tid, snapshot, slot, &call_again,
221 all_dead);
224
225 return found;
226}
227
228
229/* ------------------------------------------------------------------------
230 * Functions for non-modifying operations on individual tuples
231 * ------------------------------------------------------------------------
232 */
233
234void
236{
237 Relation rel = scan->rs_rd;
238 const TableAmRoutine *tableam = rel->rd_tableam;
239
240 /*
241 * We don't expect direct calls to table_tuple_get_latest_tid with valid
242 * CheckXidAlive for catalog or regular tables. See detailed comments in
243 * xact.c where these variables are declared.
244 */
246 elog(ERROR, "unexpected table_tuple_get_latest_tid call during logical decoding");
247
248 /*
249 * Since this can be called with user-supplied TID, don't trust the input
250 * too much.
251 */
252 if (!tableam->tuple_tid_valid(scan, tid))
254 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
255 errmsg("tid (%u, %u) is not valid for relation \"%s\"",
259
260 tableam->tuple_get_latest_tid(scan, tid);
261}
262
263
264/* ----------------------------------------------------------------------------
265 * Functions to make modifications a bit simpler.
266 * ----------------------------------------------------------------------------
267 */
268
269/*
270 * simple_table_tuple_insert - insert a tuple
271 *
272 * Currently, this routine differs from table_tuple_insert only in supplying a
273 * default command ID and not allowing access to the speedup options.
274 */
275void
277{
278 table_tuple_insert(rel, slot, GetCurrentCommandId(true), 0, NULL);
279}
280
281/*
282 * simple_table_tuple_delete - delete a tuple
283 *
284 * This routine may be used to delete a tuple when concurrent updates of
285 * the target tuple are not expected (for example, because we have a lock
286 * on the relation associated with the tuple). Any failure is reported
287 * via ereport().
288 */
289void
291{
292 TM_Result result;
293 TM_FailureData tmfd;
294
295 result = table_tuple_delete(rel, tid,
297 snapshot, InvalidSnapshot,
298 true /* wait for commit */ ,
299 &tmfd, false /* changingPart */ );
300
301 switch (result)
302 {
303 case TM_SelfModified:
304 /* Tuple was already updated in current command? */
305 elog(ERROR, "tuple already updated by self");
306 break;
307
308 case TM_Ok:
309 /* done successfully */
310 break;
311
312 case TM_Updated:
313 elog(ERROR, "tuple concurrently updated");
314 break;
315
316 case TM_Deleted:
317 elog(ERROR, "tuple concurrently deleted");
318 break;
319
320 default:
321 elog(ERROR, "unrecognized table_tuple_delete status: %u", result);
322 break;
323 }
324}
325
326/*
327 * simple_table_tuple_update - replace a tuple
328 *
329 * This routine may be used to update a tuple when concurrent updates of
330 * the target tuple are not expected (for example, because we have a lock
331 * on the relation associated with the tuple). Any failure is reported
332 * via ereport().
333 */
334void
336 TupleTableSlot *slot,
337 Snapshot snapshot,
338 TU_UpdateIndexes *update_indexes)
339{
340 TM_Result result;
341 TM_FailureData tmfd;
342 LockTupleMode lockmode;
343
344 result = table_tuple_update(rel, otid, slot,
346 snapshot, InvalidSnapshot,
347 true /* wait for commit */ ,
348 &tmfd, &lockmode, update_indexes);
349
350 switch (result)
351 {
352 case TM_SelfModified:
353 /* Tuple was already updated in current command? */
354 elog(ERROR, "tuple already updated by self");
355 break;
356
357 case TM_Ok:
358 /* done successfully */
359 break;
360
361 case TM_Updated:
362 elog(ERROR, "tuple concurrently updated");
363 break;
364
365 case TM_Deleted:
366 elog(ERROR, "tuple concurrently deleted");
367 break;
368
369 default:
370 elog(ERROR, "unrecognized table_tuple_update status: %u", result);
371 break;
372 }
373}
374
375
376/* ----------------------------------------------------------------------------
377 * Helper functions to implement parallel scans for block oriented AMs.
378 * ----------------------------------------------------------------------------
379 */
380
381Size
383{
384 return sizeof(ParallelBlockTableScanDescData);
385}
386
387Size
389{
391
392 bpscan->base.phs_locator = rel->rd_locator;
394 /* compare phs_syncscan initialization to similar logic in initscan */
397 bpscan->phs_nblocks > NBuffers / 4;
398 SpinLockInit(&bpscan->phs_mutex);
401
402 return sizeof(ParallelBlockTableScanDescData);
403}
404
405void
407{
409
411}
412
413/*
414 * find and set the scan's startblock
415 *
416 * Determine where the parallel seq scan should start. This function may be
417 * called many times, once by each parallel worker. We must be careful only
418 * to set the startblock once.
419 */
420void
424{
425 BlockNumber sync_startpage = InvalidBlockNumber;
426
427 /* Reset the state we use for controlling allocation size. */
428 memset(pbscanwork, 0, sizeof(*pbscanwork));
429
430 StaticAssertStmt(MaxBlockNumber <= 0xFFFFFFFE,
431 "pg_nextpower2_32 may be too small for non-standard BlockNumber width");
432
433 /*
434 * We determine the chunk size based on the size of the relation. First we
435 * split the relation into PARALLEL_SEQSCAN_NCHUNKS chunks but we then
436 * take the next highest power of 2 number of the chunk size. This means
437 * we split the relation into somewhere between PARALLEL_SEQSCAN_NCHUNKS
438 * and PARALLEL_SEQSCAN_NCHUNKS / 2 chunks.
439 */
440 pbscanwork->phsw_chunk_size = pg_nextpower2_32(Max(pbscan->phs_nblocks /
442
443 /*
444 * Ensure we don't go over the maximum chunk size with larger tables. This
445 * means we may get much more than PARALLEL_SEQSCAN_NCHUNKS for larger
446 * tables. Too large a chunk size has been shown to be detrimental to
447 * synchronous scan performance.
448 */
449 pbscanwork->phsw_chunk_size = Min(pbscanwork->phsw_chunk_size,
451
452retry:
453 /* Grab the spinlock. */
454 SpinLockAcquire(&pbscan->phs_mutex);
455
456 /*
457 * If the scan's startblock has not yet been initialized, we must do so
458 * now. If this is not a synchronized scan, we just start at block 0, but
459 * if it is a synchronized scan, we must get the starting position from
460 * the synchronized scan machinery. We can't hold the spinlock while
461 * doing that, though, so release the spinlock, get the information we
462 * need, and retry. If nobody else has initialized the scan in the
463 * meantime, we'll fill in the value we fetched on the second time
464 * through.
465 */
466 if (pbscan->phs_startblock == InvalidBlockNumber)
467 {
468 if (!pbscan->base.phs_syncscan)
469 pbscan->phs_startblock = 0;
470 else if (sync_startpage != InvalidBlockNumber)
471 pbscan->phs_startblock = sync_startpage;
472 else
473 {
474 SpinLockRelease(&pbscan->phs_mutex);
475 sync_startpage = ss_get_location(rel, pbscan->phs_nblocks);
476 goto retry;
477 }
478 }
479 SpinLockRelease(&pbscan->phs_mutex);
480}
481
482/*
483 * get the next page to scan
484 *
485 * Get the next page to scan. Even if there are no pages left to scan,
486 * another backend could have grabbed a page to scan and not yet finished
487 * looking at it, so it doesn't follow that the scan is done when the first
488 * backend gets an InvalidBlockNumber return.
489 */
494{
495 BlockNumber page;
496 uint64 nallocated;
497
498 /*
499 * The logic below allocates block numbers out to parallel workers in a
500 * way that each worker will receive a set of consecutive block numbers to
501 * scan. Earlier versions of this would allocate the next highest block
502 * number to the next worker to call this function. This would generally
503 * result in workers never receiving consecutive block numbers. Some
504 * operating systems would not detect the sequential I/O pattern due to
505 * each backend being a different process which could result in poor
506 * performance due to inefficient or no readahead. To work around this
507 * issue, we now allocate a range of block numbers for each worker and
508 * when they come back for another block, we give them the next one in
509 * that range until the range is complete. When the worker completes the
510 * range of blocks we then allocate another range for it and return the
511 * first block number from that range.
512 *
513 * Here we name these ranges of blocks "chunks". The initial size of
514 * these chunks is determined in table_block_parallelscan_startblock_init
515 * based on the size of the relation. Towards the end of the scan, we
516 * start making reductions in the size of the chunks in order to attempt
517 * to divide the remaining work over all the workers as evenly as
518 * possible.
519 *
520 * Here pbscanwork is local worker memory. phsw_chunk_remaining tracks
521 * the number of blocks remaining in the chunk. When that reaches 0 then
522 * we must allocate a new chunk for the worker.
523 *
524 * phs_nallocated tracks how many blocks have been allocated to workers
525 * already. When phs_nallocated >= rs_nblocks, all blocks have been
526 * allocated.
527 *
528 * Because we use an atomic fetch-and-add to fetch the current value, the
529 * phs_nallocated counter will exceed rs_nblocks, because workers will
530 * still increment the value, when they try to allocate the next block but
531 * all blocks have been allocated already. The counter must be 64 bits
532 * wide because of that, to avoid wrapping around when rs_nblocks is close
533 * to 2^32.
534 *
535 * The actual block to return is calculated by adding the counter to the
536 * starting block number, modulo nblocks.
537 */
538
539 /*
540 * First check if we have any remaining blocks in a previous chunk for
541 * this worker. We must consume all of the blocks from that before we
542 * allocate a new chunk to the worker.
543 */
544 if (pbscanwork->phsw_chunk_remaining > 0)
545 {
546 /*
547 * Give them the next block in the range and update the remaining
548 * number of blocks.
549 */
550 nallocated = ++pbscanwork->phsw_nallocated;
551 pbscanwork->phsw_chunk_remaining--;
552 }
553 else
554 {
555 /*
556 * When we've only got PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS chunks
557 * remaining in the scan, we half the chunk size. Since we reduce the
558 * chunk size here, we'll hit this again after doing
559 * PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS at the new size. After a few
560 * iterations of this, we'll end up doing the last few blocks with the
561 * chunk size set to 1.
562 */
563 if (pbscanwork->phsw_chunk_size > 1 &&
564 pbscanwork->phsw_nallocated > pbscan->phs_nblocks -
566 pbscanwork->phsw_chunk_size >>= 1;
567
568 nallocated = pbscanwork->phsw_nallocated =
570 pbscanwork->phsw_chunk_size);
571
572 /*
573 * Set the remaining number of blocks in this chunk so that subsequent
574 * calls from this worker continue on with this chunk until it's done.
575 */
576 pbscanwork->phsw_chunk_remaining = pbscanwork->phsw_chunk_size - 1;
577 }
578
579 if (nallocated >= pbscan->phs_nblocks)
580 page = InvalidBlockNumber; /* all blocks have been allocated */
581 else
582 page = (nallocated + pbscan->phs_startblock) % pbscan->phs_nblocks;
583
584 /*
585 * Report scan location. Normally, we report the current page number.
586 * When we reach the end of the scan, though, we report the starting page,
587 * not the ending page, just so the starting positions for later scans
588 * doesn't slew backwards. We only report the position at the end of the
589 * scan once, though: subsequent callers will report nothing.
590 */
591 if (pbscan->base.phs_syncscan)
592 {
593 if (page != InvalidBlockNumber)
594 ss_report_location(rel, page);
595 else if (nallocated == pbscan->phs_nblocks)
597 }
598
599 return page;
600}
601
602/* ----------------------------------------------------------------------------
603 * Helper functions to implement relation sizing for block oriented AMs.
604 * ----------------------------------------------------------------------------
605 */
606
607/*
608 * table_block_relation_size
609 *
610 * If a table AM uses the various relation forks as the sole place where data
611 * is stored, and if it uses them in the expected manner (e.g. the actual data
612 * is in the main fork rather than some other), it can use this implementation
613 * of the relation_size callback rather than implementing its own.
614 */
615uint64
617{
618 uint64 nblocks = 0;
619
620 /* InvalidForkNumber indicates returning the size for all forks */
621 if (forkNumber == InvalidForkNumber)
622 {
623 for (int i = 0; i < MAX_FORKNUM; i++)
624 nblocks += smgrnblocks(RelationGetSmgr(rel), i);
625 }
626 else
627 nblocks = smgrnblocks(RelationGetSmgr(rel), forkNumber);
628
629 return nblocks * BLCKSZ;
630}
631
632/*
633 * table_block_relation_estimate_size
634 *
635 * This function can't be directly used as the implementation of the
636 * relation_estimate_size callback, because it has a few additional parameters.
637 * Instead, it is intended to be used as a helper function; the caller can
638 * pass through the arguments to its relation_estimate_size function plus the
639 * additional values required here.
640 *
641 * overhead_bytes_per_tuple should contain the approximate number of bytes
642 * of storage required to store a tuple above and beyond what is required for
643 * the tuple data proper. Typically, this would include things like the
644 * size of the tuple header and item pointer. This is only used for query
645 * planning, so a table AM where the value is not constant could choose to
646 * pass a "best guess".
647 *
648 * usable_bytes_per_page should contain the approximate number of bytes per
649 * page usable for tuple data, excluding the page header and any anticipated
650 * special space.
651 */
652void
654 BlockNumber *pages, double *tuples,
655 double *allvisfrac,
656 Size overhead_bytes_per_tuple,
657 Size usable_bytes_per_page)
658{
659 BlockNumber curpages;
660 BlockNumber relpages;
661 double reltuples;
662 BlockNumber relallvisible;
663 double density;
664
665 /* it should have storage, so we can call the smgr */
666 curpages = RelationGetNumberOfBlocks(rel);
667
668 /* coerce values in pg_class to more desirable types */
669 relpages = (BlockNumber) rel->rd_rel->relpages;
670 reltuples = (double) rel->rd_rel->reltuples;
671 relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
672
673 /*
674 * HACK: if the relation has never yet been vacuumed, use a minimum size
675 * estimate of 10 pages. The idea here is to avoid assuming a
676 * newly-created table is really small, even if it currently is, because
677 * that may not be true once some data gets loaded into it. Once a vacuum
678 * or analyze cycle has been done on it, it's more reasonable to believe
679 * the size is somewhat stable.
680 *
681 * (Note that this is only an issue if the plan gets cached and used again
682 * after the table has been filled. What we're trying to avoid is using a
683 * nestloop-type plan on a table that has grown substantially since the
684 * plan was made. Normally, autovacuum/autoanalyze will occur once enough
685 * inserts have happened and cause cached-plan invalidation; but that
686 * doesn't happen instantaneously, and it won't happen at all for cases
687 * such as temporary tables.)
688 *
689 * We test "never vacuumed" by seeing whether reltuples < 0.
690 *
691 * If the table has inheritance children, we don't apply this heuristic.
692 * Totally empty parent tables are quite common, so we should be willing
693 * to believe that they are empty.
694 */
695 if (curpages < 10 &&
696 reltuples < 0 &&
697 !rel->rd_rel->relhassubclass)
698 curpages = 10;
699
700 /* report estimated # pages */
701 *pages = curpages;
702 /* quick exit if rel is clearly empty */
703 if (curpages == 0)
704 {
705 *tuples = 0;
706 *allvisfrac = 0;
707 return;
708 }
709
710 /* estimate number of tuples from previous tuple density */
711 if (reltuples >= 0 && relpages > 0)
712 density = reltuples / (double) relpages;
713 else
714 {
715 /*
716 * When we have no data because the relation was never yet vacuumed,
717 * estimate tuple width from attribute datatypes. We assume here that
718 * the pages are completely full, which is OK for tables but is
719 * probably an overestimate for indexes. Fortunately
720 * get_relation_info() can clamp the overestimate to the parent
721 * table's size.
722 *
723 * Note: this code intentionally disregards alignment considerations,
724 * because (a) that would be gilding the lily considering how crude
725 * the estimate is, (b) it creates platform dependencies in the
726 * default plans which are kind of a headache for regression testing,
727 * and (c) different table AMs might use different padding schemes.
728 */
729 int32 tuple_width;
730 int fillfactor;
731
732 /*
733 * Without reltuples/relpages, we also need to consider fillfactor.
734 * The other branch considers it implicitly by calculating density
735 * from actual relpages/reltuples statistics.
736 */
738
739 tuple_width = get_rel_data_width(rel, attr_widths);
740 tuple_width += overhead_bytes_per_tuple;
741 /* note: integer division is intentional here */
742 density = (usable_bytes_per_page * fillfactor / 100) / tuple_width;
743 }
744 *tuples = rint(density * (double) curpages);
745
746 /*
747 * We use relallvisible as-is, rather than scaling it up like we do for
748 * the pages and tuples counts, on the theory that any pages added since
749 * the last VACUUM are most likely not marked all-visible. But costsize.c
750 * wants it converted to a fraction.
751 */
752 if (relallvisible == 0 || curpages <= 0)
753 *allvisfrac = 0;
754 else if ((double) relallvisible >= curpages)
755 *allvisfrac = 1;
756 else
757 *allvisfrac = (double) relallvisible / curpages;
758}
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:485
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:522
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:453
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
#define MaxBlockNumber
Definition: block.h:35
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:273
#define Min(x, y)
Definition: c.h:958
#define Max(x, y)
Definition: c.h:952
#define Assert(condition)
Definition: c.h:812
int32_t int32
Definition: c.h:481
uint64_t uint64
Definition: c.h:486
#define unlikely(x)
Definition: c.h:330
uint32_t uint32
Definition: c.h:485
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:892
size_t Size
Definition: c.h:559
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1425
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:84
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1441
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:85
int NBuffers
Definition: globals.c:141
int i
Definition: isn.c:72
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
static OffsetNumber ItemPointerGetOffsetNumberNoCheck(const ItemPointerData *pointer)
Definition: itemptr.h:114
static BlockNumber ItemPointerGetBlockNumberNoCheck(const ItemPointerData *pointer)
Definition: itemptr.h:93
List * lappend(List *list, void *datum)
Definition: list.c:339
LockTupleMode
Definition: lockoptions.h:50
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189
static int fillfactor
Definition: pgbench.c:187
int32 get_rel_data_width(Relation rel, int32 *attr_widths)
Definition: plancat.c:1192
unsigned int Oid
Definition: postgres_ext.h:31
#define RelationGetRelid(relation)
Definition: rel.h:505
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:567
#define RelationGetDescr(relation)
Definition: rel.h:531
#define RelationGetFillFactor(relation, defaultff)
Definition: rel.h:363
#define RelationGetRelationName(relation)
Definition: rel.h:539
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:637
#define HEAP_DEFAULT_FILLFACTOR
Definition: rel.h:349
#define RelFileLocatorEquals(locator1, locator2)
ForkNumber
Definition: relpath.h:56
@ InvalidForkNumber
Definition: relpath.h:57
#define MAX_FORKNUM
Definition: relpath.h:70
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:102
struct ParallelBlockTableScanDescData ParallelBlockTableScanDescData
Size add_size(Size s1, Size s2)
Definition: shmem.c:488
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:677
void SerializeSnapshot(Snapshot snapshot, char *start_address)
Definition: snapmgr.c:1664
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:314
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:1721
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:752
Size EstimateSnapshotSpace(Snapshot snapshot)
Definition: snapmgr.c:1640
#define SnapshotAny
Definition: snapmgr.h:33
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:55
#define InvalidSnapshot
Definition: snapshot.h:119
#define SpinLockInit(lock)
Definition: spin.h:57
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
Definition: pg_list.h:54
pg_atomic_uint64 phs_nallocated
Definition: relscan.h:99
ParallelTableScanDescData base
Definition: relscan.h:94
RelFileLocator phs_locator
Definition: relscan.h:82
const struct TableAmRoutine * rd_tableam
Definition: rel.h:189
RelFileLocator rd_locator
Definition: rel.h:57
Form_pg_class rd_rel
Definition: rel.h:111
Size(* parallelscan_initialize)(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.h:399
void(* tuple_get_latest_tid)(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:488
const TupleTableSlotOps *(* slot_callbacks)(Relation rel)
Definition: tableam.h:303
TableScanDesc(* scan_begin)(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, ParallelTableScanDesc pscan, uint32 flags)
Definition: tableam.h:327
bool(* tuple_tid_valid)(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:481
Size(* parallelscan_estimate)(Relation rel)
Definition: tableam.h:392
Relation rs_rd
Definition: relscan.h:36
void ss_report_location(Relation rel, BlockNumber location)
Definition: syncscan.c:289
BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks)
Definition: syncscan.c:254
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:91
#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE
Definition: tableam.c:45
void simple_table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, Snapshot snapshot, TU_UpdateIndexes *update_indexes)
Definition: tableam.c:335
bool table_index_fetch_tuple_check(Relation rel, ItemPointer tid, Snapshot snapshot, bool *all_dead)
Definition: tableam.c:208
Size table_block_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.c:388
TableScanDesc table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
Definition: tableam.c:165
void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot)
Definition: tableam.c:276
#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS
Definition: tableam.c:43
void table_block_parallelscan_startblock_init(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
Definition: tableam.c:421
TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
Definition: tableam.c:112
char * default_table_access_method
Definition: tableam.c:48
void table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid)
Definition: tableam.c:235
void simple_table_tuple_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
Definition: tableam.c:290
void table_block_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.c:406
uint64 table_block_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.c:616
Size table_parallelscan_estimate(Relation rel, Snapshot snapshot)
Definition: tableam.c:130
Size table_block_parallelscan_estimate(Relation rel)
Definition: tableam.c:382
#define PARALLEL_SEQSCAN_NCHUNKS
Definition: tableam.c:41
void table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan, Snapshot snapshot)
Definition: tableam.c:145
const TupleTableSlotOps * table_slot_callbacks(Relation relation)
Definition: tableam.c:58
BlockNumber table_block_parallelscan_nextpage(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
Definition: tableam.c:491
void table_block_relation_estimate_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
Definition: tableam.c:653
bool synchronize_seqscans
Definition: tableam.c:49
#define DEFAULT_TABLE_ACCESS_METHOD
Definition: tableam.h:29
@ SO_ALLOW_STRAT
Definition: tableam.h:57
@ SO_TEMP_SNAPSHOT
Definition: tableam.h:64
@ SO_ALLOW_PAGEMODE
Definition: tableam.h:61
@ SO_ALLOW_SYNC
Definition: tableam.h:59
@ SO_TYPE_SEQSCAN
Definition: tableam.h:48
TU_UpdateIndexes
Definition: tableam.h:117
TM_Result
Definition: tableam.h:79
@ TM_Ok
Definition: tableam.h:84
@ TM_Deleted
Definition: tableam.h:99
@ TM_Updated
Definition: tableam.h:96
@ TM_SelfModified
Definition: tableam.h:90
static IndexFetchTableData * table_index_fetch_begin(Relation rel)
Definition: tableam.h:1197
static TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
Definition: tableam.h:1540
static void table_index_fetch_end(struct IndexFetchTableData *scan)
Definition: tableam.h:1216
static TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: tableam.h:1496
static bool table_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
Definition: tableam.h:1246
static void table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate)
Definition: tableam.h:1407
#define TransactionIdIsValid(xid)
Definition: transam.h:41
bool bsysscan
Definition: xact.c:99
TransactionId CheckXidAlive
Definition: xact.c:98
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:828