PostgreSQL Source Code  git master
tableam.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * tableam.h
4  * POSTGRES table access method definitions.
5  *
6  *
7  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/access/tableam.h
11  *
12  * NOTES
13  * See tableam.sgml for higher level documentation.
14  *
15  *-------------------------------------------------------------------------
16  */
17 #ifndef TABLEAM_H
18 #define TABLEAM_H
19 
20 #include "access/relscan.h"
21 #include "access/sdir.h"
22 #include "access/xact.h"
23 #include "utils/guc.h"
24 #include "utils/rel.h"
25 #include "utils/snapshot.h"
26 
27 
28 #define DEFAULT_TABLE_ACCESS_METHOD "heap"
29 
30 /* GUCs */
31 extern char *default_table_access_method;
32 extern bool synchronize_seqscans;
33 
34 
35 struct BulkInsertStateData;
36 struct IndexInfo;
37 struct SampleScanState;
38 struct TBMIterateResult;
39 struct VacuumParams;
40 struct ValidateIndexState;
41 
42 /*
43  * Bitmask values for the flags argument to the scan_begin callback.
44  */
45 typedef enum ScanOptions
46 {
47  /* one of SO_TYPE_* may be specified */
48  SO_TYPE_SEQSCAN = 1 << 0,
51  SO_TYPE_TIDSCAN = 1 << 3,
53  SO_TYPE_ANALYZE = 1 << 5,
54 
55  /* several of SO_ALLOW_* may be specified */
56  /* allow or disallow use of access strategy */
57  SO_ALLOW_STRAT = 1 << 6,
58  /* report location to syncscan logic? */
59  SO_ALLOW_SYNC = 1 << 7,
60  /* verify visibility page-at-a-time? */
62 
63  /* unregister snapshot at scan end? */
65 } ScanOptions;
66 
67 /*
68  * Result codes for table_{update,delete,lock_tuple}, and for visibility
69  * routines inside table AMs.
70  */
71 typedef enum TM_Result
72 {
73  /*
74  * Signals that the action succeeded (i.e. update/delete performed, lock
75  * was acquired)
76  */
78 
79  /* The affected tuple wasn't visible to the relevant snapshot */
81 
82  /* The affected tuple was already modified by the calling backend */
84 
85  /*
86  * The affected tuple was updated by another transaction. This includes
87  * the case where tuple was moved to another partition.
88  */
90 
91  /* The affected tuple was deleted by another transaction */
93 
94  /*
95  * The affected tuple is currently being modified by another session. This
96  * will only be returned if table_(update/delete/lock_tuple) are
97  * instructed not to wait.
98  */
100 
101  /* lock couldn't be acquired, action skipped. Only used by lock_tuple */
103 } TM_Result;
104 
105 /*
106  * When table_tuple_update, table_tuple_delete, or table_tuple_lock fail
107  * because the target tuple is already outdated, they fill in this struct to
108  * provide information to the caller about what happened.
109  *
110  * ctid is the target's ctid link: it is the same as the target's TID if the
111  * target was deleted, or the location of the replacement tuple if the target
112  * was updated.
113  *
114  * xmax is the outdating transaction's XID. If the caller wants to visit the
115  * replacement tuple, it must check that this matches before believing the
116  * replacement is really a match.
117  *
118  * cmax is the outdating command's CID, but only when the failure code is
119  * TM_SelfModified (i.e., something in the current transaction outdated the
120  * tuple); otherwise cmax is zero. (We make this restriction because
121  * HeapTupleHeaderGetCmax doesn't work for tuples outdated in other
122  * transactions.)
123  */
124 typedef struct TM_FailureData
125 {
129  bool traversed;
131 
132 /*
133  * State used when calling table_index_delete_tuples().
134  *
135  * Represents the status of table tuples, referenced by table TID and taken by
136  * index AM from index tuples. State consists of high level parameters of the
137  * deletion operation, plus two mutable palloc()'d arrays for information
138  * about the status of individual table tuples. These are conceptually one
139  * single array. Using two arrays keeps the TM_IndexDelete struct small,
140  * which makes sorting the first array (the deltids array) fast.
141  *
142  * Some index AM callers perform simple index tuple deletion (by specifying
143  * bottomup = false), and include only known-dead deltids. These known-dead
144  * entries are all marked knowndeletable = true directly (typically these are
145  * TIDs from LP_DEAD-marked index tuples), but that isn't strictly required.
146  *
147  * Callers that specify bottomup = true are "bottom-up index deletion"
148  * callers. The considerations for the tableam are more subtle with these
149  * callers because they ask the tableam to perform highly speculative work,
150  * and might only expect the tableam to check a small fraction of all entries.
151  * Caller is not allowed to specify knowndeletable = true for any entry
152  * because everything is highly speculative. Bottom-up caller provides
153  * context and hints to tableam -- see comments below for details on how index
154  * AMs and tableams should coordinate during bottom-up index deletion.
155  *
156  * Simple index deletion callers may ask the tableam to perform speculative
157  * work, too. This is a little like bottom-up deletion, but not too much.
158  * The tableam will only perform speculative work when it's practically free
159  * to do so in passing for simple deletion caller (while always performing
160  * whatever work is is needed to enable knowndeletable/LP_DEAD index tuples to
161  * be deleted within index AM). This is the real reason why it's possible for
162  * simple index deletion caller to specify knowndeletable = false up front
163  * (this means "check if it's possible for me to delete corresponding index
164  * tuple when it's cheap to do so in passing"). The index AM should only
165  * include "extra" entries for index tuples whose TIDs point to a table block
166  * that tableam is expected to have to visit anyway (in the event of a block
167  * orientated tableam). The tableam isn't strictly obligated to check these
168  * "extra" TIDs, but a block-based AM should always manage to do so in
169  * practice.
170  *
171  * The final contents of the deltids/status arrays are interesting to callers
172  * that ask tableam to perform speculative work (i.e. when _any_ items have
173  * knowndeletable set to false up front). These index AM callers will
174  * naturally need to consult final state to determine which index tuples are
175  * in fact deletable.
176  *
177  * The index AM can keep track of which index tuple relates to which deltid by
178  * setting idxoffnum (and/or relying on each entry being uniquely identifiable
179  * using tid), which is important when the final contents of the array will
180  * need to be interpreted -- the array can shrink from initial size after
181  * tableam processing and/or have entries in a new order (tableam may sort
182  * deltids array for its own reasons). Bottom-up callers may find that final
183  * ndeltids is 0 on return from call to tableam, in which case no index tuple
184  * deletions are possible. Simple deletion callers can rely on any entries
185  * they know to be deletable appearing in the final array as deletable.
186  */
187 typedef struct TM_IndexDelete
188 {
189  ItemPointerData tid; /* table TID from index tuple */
190  int16 id; /* Offset into TM_IndexStatus array */
192 
193 typedef struct TM_IndexStatus
194 {
195  OffsetNumber idxoffnum; /* Index am page offset number */
196  bool knowndeletable; /* Currently known to be deletable? */
197 
198  /* Bottom-up index deletion specific fields follow */
199  bool promising; /* Promising (duplicate) index tuple? */
200  int16 freespace; /* Space freed in index if deleted */
202 
203 /*
204  * Index AM/tableam coordination is central to the design of bottom-up index
205  * deletion. The index AM provides hints about where to look to the tableam
206  * by marking some entries as "promising". Index AM does this with duplicate
207  * index tuples that are strongly suspected to be old versions left behind by
208  * UPDATEs that did not logically modify indexed values. Index AM may find it
209  * helpful to only mark entries as promising when they're thought to have been
210  * affected by such an UPDATE in the recent past.
211  *
212  * Bottom-up index deletion casts a wide net at first, usually by including
213  * all TIDs on a target index page. It is up to the tableam to worry about
214  * the cost of checking transaction status information. The tableam is in
215  * control, but needs careful guidance from the index AM. Index AM requests
216  * that bottomupfreespace target be met, while tableam measures progress
217  * towards that goal by tallying the per-entry freespace value for known
218  * deletable entries. (All !bottomup callers can just set these space related
219  * fields to zero.)
220  */
221 typedef struct TM_IndexDeleteOp
222 {
223  bool bottomup; /* Bottom-up (not simple) deletion? */
224  int bottomupfreespace; /* Bottom-up space target */
225 
226  /* Mutable per-TID information follows (index AM initializes entries) */
227  int ndeltids; /* Current # of deltids/status elements */
231 
232 /* "options" flag bits for table_tuple_insert */
233 /* TABLE_INSERT_SKIP_WAL was 0x0001; RelationNeedsWAL() now governs */
234 #define TABLE_INSERT_SKIP_FSM 0x0002
235 #define TABLE_INSERT_FROZEN 0x0004
236 #define TABLE_INSERT_NO_LOGICAL 0x0008
237 
238 /* flag bits for table_tuple_lock */
239 /* Follow tuples whose update is in progress if lock modes don't conflict */
240 #define TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS (1 << 0)
241 /* Follow update chain and lock latest version of tuple */
242 #define TUPLE_LOCK_FLAG_FIND_LAST_VERSION (1 << 1)
243 
244 
245 /* Typedef for callback function for table_index_build_scan */
247  ItemPointer tid,
248  Datum *values,
249  bool *isnull,
250  bool tupleIsAlive,
251  void *state);
252 
253 /*
254  * API struct for a table AM. Note this must be allocated in a
255  * server-lifetime manner, typically as a static const struct, which then gets
256  * returned by FormData_pg_am.amhandler.
257  *
258  * In most cases it's not appropriate to call the callbacks directly, use the
259  * table_* wrapper functions instead.
260  *
261  * GetTableAmRoutine() asserts that required callbacks are filled in, remember
262  * to update when adding a callback.
263  */
264 typedef struct TableAmRoutine
265 {
266  /* this must be set to T_TableAmRoutine */
268 
269 
270  /* ------------------------------------------------------------------------
271  * Slot related callbacks.
272  * ------------------------------------------------------------------------
273  */
274 
275  /*
276  * Return slot implementation suitable for storing a tuple of this AM.
277  */
278  const TupleTableSlotOps *(*slot_callbacks) (Relation rel);
279 
280 
281  /* ------------------------------------------------------------------------
282  * Table scan callbacks.
283  * ------------------------------------------------------------------------
284  */
285 
286  /*
287  * Start a scan of `rel`. The callback has to return a TableScanDesc,
288  * which will typically be embedded in a larger, AM specific, struct.
289  *
290  * If nkeys != 0, the results need to be filtered by those scan keys.
291  *
292  * pscan, if not NULL, will have already been initialized with
293  * parallelscan_initialize(), and has to be for the same relation. Will
294  * only be set coming from table_beginscan_parallel().
295  *
296  * `flags` is a bitmask indicating the type of scan (ScanOptions's
297  * SO_TYPE_*, currently only one may be specified), options controlling
298  * the scan's behaviour (ScanOptions's SO_ALLOW_*, several may be
299  * specified, an AM may ignore unsupported ones) and whether the snapshot
300  * needs to be deallocated at scan_end (ScanOptions's SO_TEMP_SNAPSHOT).
301  */
302  TableScanDesc (*scan_begin) (Relation rel,
303  Snapshot snapshot,
304  int nkeys, struct ScanKeyData *key,
305  ParallelTableScanDesc pscan,
306  uint32 flags);
307 
308  /*
309  * Release resources and deallocate scan. If TableScanDesc.temp_snap,
310  * TableScanDesc.rs_snapshot needs to be unregistered.
311  */
312  void (*scan_end) (TableScanDesc scan);
313 
314  /*
315  * Restart relation scan. If set_params is set to true, allow_{strat,
316  * sync, pagemode} (see scan_begin) changes should be taken into account.
317  */
318  void (*scan_rescan) (TableScanDesc scan, struct ScanKeyData *key,
319  bool set_params, bool allow_strat,
320  bool allow_sync, bool allow_pagemode);
321 
322  /*
323  * Return next tuple from `scan`, store in slot.
324  */
325  bool (*scan_getnextslot) (TableScanDesc scan,
326  ScanDirection direction,
327  TupleTableSlot *slot);
328 
329  /*-----------
330  * Optional functions to provide scanning for ranges of ItemPointers.
331  * Implementations must either provide both of these functions, or neither
332  * of them.
333  *
334  * Implementations of scan_set_tidrange must themselves handle
335  * ItemPointers of any value. i.e, they must handle each of the following:
336  *
337  * 1) mintid or maxtid is beyond the end of the table; and
338  * 2) mintid is above maxtid; and
339  * 3) item offset for mintid or maxtid is beyond the maximum offset
340  * allowed by the AM.
341  *
342  * Implementations can assume that scan_set_tidrange is always called
343  * before can_getnextslot_tidrange or after scan_rescan and before any
344  * further calls to scan_getnextslot_tidrange.
345  */
346  void (*scan_set_tidrange) (TableScanDesc scan,
347  ItemPointer mintid,
348  ItemPointer maxtid);
349 
350  /*
351  * Return next tuple from `scan` that's in the range of TIDs defined by
352  * scan_set_tidrange.
353  */
354  bool (*scan_getnextslot_tidrange) (TableScanDesc scan,
355  ScanDirection direction,
356  TupleTableSlot *slot);
357 
358  /* ------------------------------------------------------------------------
359  * Parallel table scan related functions.
360  * ------------------------------------------------------------------------
361  */
362 
363  /*
364  * Estimate the size of shared memory needed for a parallel scan of this
365  * relation. The snapshot does not need to be accounted for.
366  */
367  Size (*parallelscan_estimate) (Relation rel);
368 
369  /*
370  * Initialize ParallelTableScanDesc for a parallel scan of this relation.
371  * `pscan` will be sized according to parallelscan_estimate() for the same
372  * relation.
373  */
374  Size (*parallelscan_initialize) (Relation rel,
375  ParallelTableScanDesc pscan);
376 
377  /*
378  * Reinitialize `pscan` for a new scan. `rel` will be the same relation as
379  * when `pscan` was initialized by parallelscan_initialize.
380  */
381  void (*parallelscan_reinitialize) (Relation rel,
382  ParallelTableScanDesc pscan);
383 
384 
385  /* ------------------------------------------------------------------------
386  * Index Scan Callbacks
387  * ------------------------------------------------------------------------
388  */
389 
390  /*
391  * Prepare to fetch tuples from the relation, as needed when fetching
392  * tuples for an index scan. The callback has to return an
393  * IndexFetchTableData, which the AM will typically embed in a larger
394  * structure with additional information.
395  *
396  * Tuples for an index scan can then be fetched via index_fetch_tuple.
397  */
398  struct IndexFetchTableData *(*index_fetch_begin) (Relation rel);
399 
400  /*
401  * Reset index fetch. Typically this will release cross index fetch
402  * resources held in IndexFetchTableData.
403  */
404  void (*index_fetch_reset) (struct IndexFetchTableData *data);
405 
406  /*
407  * Release resources and deallocate index fetch.
408  */
409  void (*index_fetch_end) (struct IndexFetchTableData *data);
410 
411  /*
412  * Fetch tuple at `tid` into `slot`, after doing a visibility test
413  * according to `snapshot`. If a tuple was found and passed the visibility
414  * test, return true, false otherwise.
415  *
416  * Note that AMs that do not necessarily update indexes when indexed
417  * columns do not change, need to return the current/correct version of
418  * the tuple that is visible to the snapshot, even if the tid points to an
419  * older version of the tuple.
420  *
421  * *call_again is false on the first call to index_fetch_tuple for a tid.
422  * If there potentially is another tuple matching the tid, *call_again
423  * needs to be set to true by index_fetch_tuple, signaling to the caller
424  * that index_fetch_tuple should be called again for the same tid.
425  *
426  * *all_dead, if all_dead is not NULL, should be set to true by
427  * index_fetch_tuple iff it is guaranteed that no backend needs to see
428  * that tuple. Index AMs can use that to avoid returning that tid in
429  * future searches.
430  */
431  bool (*index_fetch_tuple) (struct IndexFetchTableData *scan,
432  ItemPointer tid,
433  Snapshot snapshot,
434  TupleTableSlot *slot,
435  bool *call_again, bool *all_dead);
436 
437 
438  /* ------------------------------------------------------------------------
439  * Callbacks for non-modifying operations on individual tuples
440  * ------------------------------------------------------------------------
441  */
442 
443  /*
444  * Fetch tuple at `tid` into `slot`, after doing a visibility test
445  * according to `snapshot`. If a tuple was found and passed the visibility
446  * test, returns true, false otherwise.
447  */
448  bool (*tuple_fetch_row_version) (Relation rel,
449  ItemPointer tid,
450  Snapshot snapshot,
451  TupleTableSlot *slot);
452 
453  /*
454  * Is tid valid for a scan of this relation.
455  */
456  bool (*tuple_tid_valid) (TableScanDesc scan,
457  ItemPointer tid);
458 
459  /*
460  * Return the latest version of the tuple at `tid`, by updating `tid` to
461  * point at the newest version.
462  */
463  void (*tuple_get_latest_tid) (TableScanDesc scan,
464  ItemPointer tid);
465 
466  /*
467  * Does the tuple in `slot` satisfy `snapshot`? The slot needs to be of
468  * the appropriate type for the AM.
469  */
470  bool (*tuple_satisfies_snapshot) (Relation rel,
471  TupleTableSlot *slot,
472  Snapshot snapshot);
473 
474  /* see table_index_delete_tuples() */
475  TransactionId (*index_delete_tuples) (Relation rel,
476  TM_IndexDeleteOp *delstate);
477 
478 
479  /* ------------------------------------------------------------------------
480  * Manipulations of physical tuples.
481  * ------------------------------------------------------------------------
482  */
483 
484  /* see table_tuple_insert() for reference about parameters */
485  void (*tuple_insert) (Relation rel, TupleTableSlot *slot,
486  CommandId cid, int options,
487  struct BulkInsertStateData *bistate);
488 
489  /* see table_tuple_insert_speculative() for reference about parameters */
490  void (*tuple_insert_speculative) (Relation rel,
491  TupleTableSlot *slot,
492  CommandId cid,
493  int options,
494  struct BulkInsertStateData *bistate,
495  uint32 specToken);
496 
497  /* see table_tuple_complete_speculative() for reference about parameters */
498  void (*tuple_complete_speculative) (Relation rel,
499  TupleTableSlot *slot,
500  uint32 specToken,
501  bool succeeded);
502 
503  /* see table_multi_insert() for reference about parameters */
504  void (*multi_insert) (Relation rel, TupleTableSlot **slots, int nslots,
505  CommandId cid, int options, struct BulkInsertStateData *bistate);
506 
507  /* see table_tuple_delete() for reference about parameters */
508  TM_Result (*tuple_delete) (Relation rel,
509  ItemPointer tid,
510  CommandId cid,
511  Snapshot snapshot,
512  Snapshot crosscheck,
513  bool wait,
514  TM_FailureData *tmfd,
515  bool changingPart);
516 
517  /* see table_tuple_update() for reference about parameters */
518  TM_Result (*tuple_update) (Relation rel,
519  ItemPointer otid,
520  TupleTableSlot *slot,
521  CommandId cid,
522  Snapshot snapshot,
523  Snapshot crosscheck,
524  bool wait,
525  TM_FailureData *tmfd,
526  LockTupleMode *lockmode,
527  bool *update_indexes);
528 
529  /* see table_tuple_lock() for reference about parameters */
530  TM_Result (*tuple_lock) (Relation rel,
531  ItemPointer tid,
532  Snapshot snapshot,
533  TupleTableSlot *slot,
534  CommandId cid,
536  LockWaitPolicy wait_policy,
537  uint8 flags,
538  TM_FailureData *tmfd);
539 
540  /*
541  * Perform operations necessary to complete insertions made via
542  * tuple_insert and multi_insert with a BulkInsertState specified. In-tree
543  * access methods ceased to use this.
544  *
545  * Typically callers of tuple_insert and multi_insert will just pass all
546  * the flags that apply to them, and each AM has to decide which of them
547  * make sense for it, and then only take actions in finish_bulk_insert for
548  * those flags, and ignore others.
549  *
550  * Optional callback.
551  */
552  void (*finish_bulk_insert) (Relation rel, int options);
553 
554 
555  /* ------------------------------------------------------------------------
556  * DDL related functionality.
557  * ------------------------------------------------------------------------
558  */
559 
560  /*
561  * This callback needs to create a new relation filenode for `rel`, with
562  * appropriate durability behaviour for `persistence`.
563  *
564  * Note that only the subset of the relcache filled by
565  * RelationBuildLocalRelation() can be relied upon and that the relation's
566  * catalog entries will either not yet exist (new relation), or will still
567  * reference the old relfilenode.
568  *
569  * As output *freezeXid, *minmulti must be set to the values appropriate
570  * for pg_class.{relfrozenxid, relminmxid}. For AMs that don't need those
571  * fields to be filled they can be set to InvalidTransactionId and
572  * InvalidMultiXactId, respectively.
573  *
574  * See also table_relation_set_new_filenode().
575  */
576  void (*relation_set_new_filenode) (Relation rel,
577  const RelFileNode *newrnode,
578  char persistence,
579  TransactionId *freezeXid,
580  MultiXactId *minmulti);
581 
582  /*
583  * This callback needs to remove all contents from `rel`'s current
584  * relfilenode. No provisions for transactional behaviour need to be made.
585  * Often this can be implemented by truncating the underlying storage to
586  * its minimal size.
587  *
588  * See also table_relation_nontransactional_truncate().
589  */
590  void (*relation_nontransactional_truncate) (Relation rel);
591 
592  /*
593  * See table_relation_copy_data().
594  *
595  * This can typically be implemented by directly copying the underlying
596  * storage, unless it contains references to the tablespace internally.
597  */
598  void (*relation_copy_data) (Relation rel,
599  const RelFileNode *newrnode);
600 
601  /* See table_relation_copy_for_cluster() */
602  void (*relation_copy_for_cluster) (Relation NewTable,
603  Relation OldTable,
604  Relation OldIndex,
605  bool use_sort,
607  TransactionId *xid_cutoff,
608  MultiXactId *multi_cutoff,
609  double *num_tuples,
610  double *tups_vacuumed,
611  double *tups_recently_dead);
612 
613  /*
614  * React to VACUUM command on the relation. The VACUUM can be triggered by
615  * a user or by autovacuum. The specific actions performed by the AM will
616  * depend heavily on the individual AM.
617  *
618  * On entry a transaction is already established, and the relation is
619  * locked with a ShareUpdateExclusive lock.
620  *
621  * Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through
622  * this routine, even if (for ANALYZE) it is part of the same VACUUM
623  * command.
624  *
625  * There probably, in the future, needs to be a separate callback to
626  * integrate with autovacuum's scheduling.
627  */
628  void (*relation_vacuum) (Relation onerel,
629  struct VacuumParams *params,
630  BufferAccessStrategy bstrategy);
631 
632  /*
633  * Prepare to analyze block `blockno` of `scan`. The scan has been started
634  * with table_beginscan_analyze(). See also
635  * table_scan_analyze_next_block().
636  *
637  * The callback may acquire resources like locks that are held until
638  * table_scan_analyze_next_tuple() returns false. It e.g. can make sense
639  * to hold a lock until all tuples on a block have been analyzed by
640  * scan_analyze_next_tuple.
641  *
642  * The callback can return false if the block is not suitable for
643  * sampling, e.g. because it's a metapage that could never contain tuples.
644  *
645  * XXX: This obviously is primarily suited for block-based AMs. It's not
646  * clear what a good interface for non block based AMs would be, so there
647  * isn't one yet.
648  */
649  bool (*scan_analyze_next_block) (TableScanDesc scan,
650  BlockNumber blockno,
651  BufferAccessStrategy bstrategy);
652 
653  /*
654  * See table_scan_analyze_next_tuple().
655  *
656  * Not every AM might have a meaningful concept of dead rows, in which
657  * case it's OK to not increment *deadrows - but note that that may
658  * influence autovacuum scheduling (see comment for relation_vacuum
659  * callback).
660  */
661  bool (*scan_analyze_next_tuple) (TableScanDesc scan,
663  double *liverows,
664  double *deadrows,
665  TupleTableSlot *slot);
666 
667  /* see table_index_build_range_scan for reference about parameters */
668  double (*index_build_range_scan) (Relation table_rel,
669  Relation index_rel,
670  struct IndexInfo *index_info,
671  bool allow_sync,
672  bool anyvisible,
673  bool progress,
674  BlockNumber start_blockno,
675  BlockNumber numblocks,
677  void *callback_state,
678  TableScanDesc scan);
679 
680  /* see table_index_validate_scan for reference about parameters */
681  void (*index_validate_scan) (Relation table_rel,
682  Relation index_rel,
683  struct IndexInfo *index_info,
684  Snapshot snapshot,
685  struct ValidateIndexState *state);
686 
687 
688  /* ------------------------------------------------------------------------
689  * Miscellaneous functions.
690  * ------------------------------------------------------------------------
691  */
692 
693  /*
694  * See table_relation_size().
695  *
696  * Note that currently a few callers use the MAIN_FORKNUM size to figure
697  * out the range of potentially interesting blocks (brin, analyze). It's
698  * probable that we'll need to revise the interface for those at some
699  * point.
700  */
701  uint64 (*relation_size) (Relation rel, ForkNumber forkNumber);
702 
703 
704  /*
705  * This callback should return true if the relation requires a TOAST table
706  * and false if it does not. It may wish to examine the relation's tuple
707  * descriptor before making a decision, but if it uses some other method
708  * of storing large values (or if it does not support them) it can simply
709  * return false.
710  */
711  bool (*relation_needs_toast_table) (Relation rel);
712 
713  /*
714  * This callback should return the OID of the table AM that implements
715  * TOAST tables for this AM. If the relation_needs_toast_table callback
716  * always returns false, this callback is not required.
717  */
718  Oid (*relation_toast_am) (Relation rel);
719 
720  /*
721  * This callback is invoked when detoasting a value stored in a toast
722  * table implemented by this AM. See table_relation_fetch_toast_slice()
723  * for more details.
724  */
725  void (*relation_fetch_toast_slice) (Relation toastrel, Oid valueid,
726  int32 attrsize,
727  int32 sliceoffset,
728  int32 slicelength,
729  struct varlena *result);
730 
731 
732  /* ------------------------------------------------------------------------
733  * Planner related functions.
734  * ------------------------------------------------------------------------
735  */
736 
737  /*
738  * See table_relation_estimate_size().
739  *
740  * While block oriented, it shouldn't be too hard for an AM that doesn't
741  * internally use blocks to convert into a usable representation.
742  *
743  * This differs from the relation_size callback by returning size
744  * estimates (both relation size and tuple count) for planning purposes,
745  * rather than returning a currently correct estimate.
746  */
747  void (*relation_estimate_size) (Relation rel, int32 *attr_widths,
748  BlockNumber *pages, double *tuples,
749  double *allvisfrac);
750 
751 
752  /* ------------------------------------------------------------------------
753  * Executor related functions.
754  * ------------------------------------------------------------------------
755  */
756 
757  /*
758  * Prepare to fetch / check / return tuples from `tbmres->blockno` as part
759  * of a bitmap table scan. `scan` was started via table_beginscan_bm().
760  * Return false if there are no tuples to be found on the page, true
761  * otherwise.
762  *
763  * This will typically read and pin the target block, and do the necessary
764  * work to allow scan_bitmap_next_tuple() to return tuples (e.g. it might
765  * make sense to perform tuple visibility checks at this time). For some
766  * AMs it will make more sense to do all the work referencing `tbmres`
767  * contents here, for others it might be better to defer more work to
768  * scan_bitmap_next_tuple.
769  *
770  * If `tbmres->blockno` is -1, this is a lossy scan and all visible tuples
771  * on the page have to be returned, otherwise the tuples at offsets in
772  * `tbmres->offsets` need to be returned.
773  *
774  * XXX: Currently this may only be implemented if the AM uses md.c as its
775  * storage manager, and uses ItemPointer->ip_blkid in a manner that maps
776  * blockids directly to the underlying storage. nodeBitmapHeapscan.c
777  * performs prefetching directly using that interface. This probably
778  * needs to be rectified at a later point.
779  *
780  * XXX: Currently this may only be implemented if the AM uses the
781  * visibilitymap, as nodeBitmapHeapscan.c unconditionally accesses it to
782  * perform prefetching. This probably needs to be rectified at a later
783  * point.
784  *
785  * Optional callback, but either both scan_bitmap_next_block and
786  * scan_bitmap_next_tuple need to exist, or neither.
787  */
788  bool (*scan_bitmap_next_block) (TableScanDesc scan,
789  struct TBMIterateResult *tbmres);
790 
791  /*
792  * Fetch the next tuple of a bitmap table scan into `slot` and return true
793  * if a visible tuple was found, false otherwise.
794  *
795  * For some AMs it will make more sense to do all the work referencing
796  * `tbmres` contents in scan_bitmap_next_block, for others it might be
797  * better to defer more work to this callback.
798  *
799  * Optional callback, but either both scan_bitmap_next_block and
800  * scan_bitmap_next_tuple need to exist, or neither.
801  */
802  bool (*scan_bitmap_next_tuple) (TableScanDesc scan,
803  struct TBMIterateResult *tbmres,
804  TupleTableSlot *slot);
805 
806  /*
807  * Prepare to fetch tuples from the next block in a sample scan. Return
808  * false if the sample scan is finished, true otherwise. `scan` was
809  * started via table_beginscan_sampling().
810  *
811  * Typically this will first determine the target block by calling the
812  * TsmRoutine's NextSampleBlock() callback if not NULL, or alternatively
813  * perform a sequential scan over all blocks. The determined block is
814  * then typically read and pinned.
815  *
816  * As the TsmRoutine interface is block based, a block needs to be passed
817  * to NextSampleBlock(). If that's not appropriate for an AM, it
818  * internally needs to perform mapping between the internal and a block
819  * based representation.
820  *
821  * Note that it's not acceptable to hold deadlock prone resources such as
822  * lwlocks until scan_sample_next_tuple() has exhausted the tuples on the
823  * block - the tuple is likely to be returned to an upper query node, and
824  * the next call could be off a long while. Holding buffer pins and such
825  * is obviously OK.
826  *
827  * Currently it is required to implement this interface, as there's no
828  * alternative way (contrary e.g. to bitmap scans) to implement sample
829  * scans. If infeasible to implement, the AM may raise an error.
830  */
831  bool (*scan_sample_next_block) (TableScanDesc scan,
832  struct SampleScanState *scanstate);
833 
834  /*
835  * This callback, only called after scan_sample_next_block has returned
836  * true, should determine the next tuple to be returned from the selected
837  * block using the TsmRoutine's NextSampleTuple() callback.
838  *
839  * The callback needs to perform visibility checks, and only return
840  * visible tuples. That obviously can mean calling NextSampleTuple()
841  * multiple times.
842  *
843  * The TsmRoutine interface assumes that there's a maximum offset on a
844  * given page, so if that doesn't apply to an AM, it needs to emulate that
845  * assumption somehow.
846  */
847  bool (*scan_sample_next_tuple) (TableScanDesc scan,
848  struct SampleScanState *scanstate,
849  TupleTableSlot *slot);
850 
852 
853 
854 /* ----------------------------------------------------------------------------
855  * Slot functions.
856  * ----------------------------------------------------------------------------
857  */
858 
859 /*
860  * Returns slot callbacks suitable for holding tuples of the appropriate type
861  * for the relation. Works for tables, views, foreign tables and partitioned
862  * tables.
863  */
865 
866 /*
867  * Returns slot using the callbacks returned by table_slot_callbacks(), and
868  * registers it on *reglist.
869  */
870 extern TupleTableSlot *table_slot_create(Relation rel, List **reglist);
871 
872 
873 /* ----------------------------------------------------------------------------
874  * Table scan functions.
875  * ----------------------------------------------------------------------------
876  */
877 
878 /*
879  * Start a scan of `rel`. Returned tuples pass a visibility test of
880  * `snapshot`, and if nkeys != 0, the results are filtered by those scan keys.
881  */
882 static inline TableScanDesc
884  int nkeys, struct ScanKeyData *key)
885 {
886  uint32 flags = SO_TYPE_SEQSCAN |
888 
889  return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
890 }
891 
892 /*
893  * Like table_beginscan(), but for scanning catalog. It'll automatically use a
894  * snapshot appropriate for scanning catalog relations.
895  */
896 extern TableScanDesc table_beginscan_catalog(Relation rel, int nkeys,
897  struct ScanKeyData *key);
898 
899 /*
900  * Like table_beginscan(), but table_beginscan_strat() offers an extended API
901  * that lets the caller control whether a nondefault buffer access strategy
902  * can be used, and whether syncscan can be chosen (possibly resulting in the
903  * scan not starting from block zero). Both of these default to true with
904  * plain table_beginscan.
905  */
906 static inline TableScanDesc
908  int nkeys, struct ScanKeyData *key,
909  bool allow_strat, bool allow_sync)
910 {
912 
913  if (allow_strat)
914  flags |= SO_ALLOW_STRAT;
915  if (allow_sync)
916  flags |= SO_ALLOW_SYNC;
917 
918  return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
919 }
920 
921 /*
922  * table_beginscan_bm is an alternative entry point for setting up a
923  * TableScanDesc for a bitmap heap scan. Although that scan technology is
924  * really quite unlike a standard seqscan, there is just enough commonality to
925  * make it worth using the same data structure.
926  */
927 static inline TableScanDesc
929  int nkeys, struct ScanKeyData *key)
930 {
932 
933  return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
934 }
935 
936 /*
937  * table_beginscan_sampling is an alternative entry point for setting up a
938  * TableScanDesc for a TABLESAMPLE scan. As with bitmap scans, it's worth
939  * using the same data structure although the behavior is rather different.
940  * In addition to the options offered by table_beginscan_strat, this call
941  * also allows control of whether page-mode visibility checking is used.
942  */
943 static inline TableScanDesc
945  int nkeys, struct ScanKeyData *key,
946  bool allow_strat, bool allow_sync,
947  bool allow_pagemode)
948 {
949  uint32 flags = SO_TYPE_SAMPLESCAN;
950 
951  if (allow_strat)
952  flags |= SO_ALLOW_STRAT;
953  if (allow_sync)
954  flags |= SO_ALLOW_SYNC;
955  if (allow_pagemode)
956  flags |= SO_ALLOW_PAGEMODE;
957 
958  return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
959 }
960 
961 /*
962  * table_beginscan_tid is an alternative entry point for setting up a
963  * TableScanDesc for a Tid scan. As with bitmap scans, it's worth using
964  * the same data structure although the behavior is rather different.
965  */
966 static inline TableScanDesc
968 {
969  uint32 flags = SO_TYPE_TIDSCAN;
970 
971  return rel->rd_tableam->scan_begin(rel, snapshot, 0, NULL, NULL, flags);
972 }
973 
974 /*
975  * table_beginscan_analyze is an alternative entry point for setting up a
976  * TableScanDesc for an ANALYZE scan. As with bitmap scans, it's worth using
977  * the same data structure although the behavior is rather different.
978  */
979 static inline TableScanDesc
981 {
982  uint32 flags = SO_TYPE_ANALYZE;
983 
984  return rel->rd_tableam->scan_begin(rel, NULL, 0, NULL, NULL, flags);
985 }
986 
987 /*
988  * End relation scan.
989  */
990 static inline void
992 {
993  scan->rs_rd->rd_tableam->scan_end(scan);
994 }
995 
996 /*
997  * Restart a relation scan.
998  */
999 static inline void
1001  struct ScanKeyData *key)
1002 {
1003  scan->rs_rd->rd_tableam->scan_rescan(scan, key, false, false, false, false);
1004 }
1005 
1006 /*
1007  * Restart a relation scan after changing params.
1008  *
1009  * This call allows changing the buffer strategy, syncscan, and pagemode
1010  * options before starting a fresh scan. Note that although the actual use of
1011  * syncscan might change (effectively, enabling or disabling reporting), the
1012  * previously selected startblock will be kept.
1013  */
1014 static inline void
1016  bool allow_strat, bool allow_sync, bool allow_pagemode)
1017 {
1018  scan->rs_rd->rd_tableam->scan_rescan(scan, key, true,
1019  allow_strat, allow_sync,
1020  allow_pagemode);
1021 }
1022 
1023 /*
1024  * Update snapshot used by the scan.
1025  */
1026 extern void table_scan_update_snapshot(TableScanDesc scan, Snapshot snapshot);
1027 
1028 /*
1029  * Return next tuple from `scan`, store in slot.
1030  */
1031 static inline bool
1033 {
1034  slot->tts_tableOid = RelationGetRelid(sscan->rs_rd);
1035 
1036  /*
1037  * We don't expect direct calls to table_scan_getnextslot with valid
1038  * CheckXidAlive for catalog or regular tables. See detailed comments in
1039  * xact.c where these variables are declared.
1040  */
1042  elog(ERROR, "unexpected table_scan_getnextslot call during logical decoding");
1043 
1044  return sscan->rs_rd->rd_tableam->scan_getnextslot(sscan, direction, slot);
1045 }
1046 
1047 /* ----------------------------------------------------------------------------
1048  * TID Range scanning related functions.
1049  * ----------------------------------------------------------------------------
1050  */
1051 
1052 /*
1053  * table_beginscan_tidrange is the entry point for setting up a TableScanDesc
1054  * for a TID range scan.
1055  */
1056 static inline TableScanDesc
1058  ItemPointer mintid,
1059  ItemPointer maxtid)
1060 {
1061  TableScanDesc sscan;
1063 
1064  sscan = rel->rd_tableam->scan_begin(rel, snapshot, 0, NULL, NULL, flags);
1065 
1066  /* Set the range of TIDs to scan */
1067  sscan->rs_rd->rd_tableam->scan_set_tidrange(sscan, mintid, maxtid);
1068 
1069  return sscan;
1070 }
1071 
1072 /*
1073  * table_rescan_tidrange resets the scan position and sets the minimum and
1074  * maximum TID range to scan for a TableScanDesc created by
1075  * table_beginscan_tidrange.
1076  */
1077 static inline void
1079  ItemPointer maxtid)
1080 {
1081  /* Ensure table_beginscan_tidrange() was used. */
1082  Assert((sscan->rs_flags & SO_TYPE_TIDRANGESCAN) != 0);
1083 
1084  sscan->rs_rd->rd_tableam->scan_rescan(sscan, NULL, false, false, false, false);
1085  sscan->rs_rd->rd_tableam->scan_set_tidrange(sscan, mintid, maxtid);
1086 }
1087 
1088 /*
1089  * Fetch the next tuple from `sscan` for a TID range scan created by
1090  * table_beginscan_tidrange(). Stores the tuple in `slot` and returns true,
1091  * or returns false if no more tuples exist in the range.
1092  */
1093 static inline bool
1095  TupleTableSlot *slot)
1096 {
1097  /* Ensure table_beginscan_tidrange() was used. */
1098  Assert((sscan->rs_flags & SO_TYPE_TIDRANGESCAN) != 0);
1099 
1100  return sscan->rs_rd->rd_tableam->scan_getnextslot_tidrange(sscan,
1101  direction,
1102  slot);
1103 }
1104 
1105 
1106 /* ----------------------------------------------------------------------------
1107  * Parallel table scan related functions.
1108  * ----------------------------------------------------------------------------
1109  */
1110 
1111 /*
1112  * Estimate the size of shared memory needed for a parallel scan of this
1113  * relation.
1114  */
1115 extern Size table_parallelscan_estimate(Relation rel, Snapshot snapshot);
1116 
1117 /*
1118  * Initialize ParallelTableScanDesc for a parallel scan of this
1119  * relation. `pscan` needs to be sized according to parallelscan_estimate()
1120  * for the same relation. Call this just once in the leader process; then,
1121  * individual workers attach via table_beginscan_parallel.
1122  */
1123 extern void table_parallelscan_initialize(Relation rel,
1124  ParallelTableScanDesc pscan,
1125  Snapshot snapshot);
1126 
1127 /*
1128  * Begin a parallel scan. `pscan` needs to have been initialized with
1129  * table_parallelscan_initialize(), for the same relation. The initialization
1130  * does not need to have happened in this backend.
1131  *
1132  * Caller must hold a suitable lock on the relation.
1133  */
1135  ParallelTableScanDesc pscan);
1136 
1137 /*
1138  * Restart a parallel scan. Call this in the leader process. Caller is
1139  * responsible for making sure that all workers have finished the scan
1140  * beforehand.
1141  */
1142 static inline void
1144 {
1145  rel->rd_tableam->parallelscan_reinitialize(rel, pscan);
1146 }
1147 
1148 
1149 /* ----------------------------------------------------------------------------
1150  * Index scan related functions.
1151  * ----------------------------------------------------------------------------
1152  */
1153 
1154 /*
1155  * Prepare to fetch tuples from the relation, as needed when fetching tuples
1156  * for an index scan.
1157  *
1158  * Tuples for an index scan can then be fetched via table_index_fetch_tuple().
1159  */
1160 static inline IndexFetchTableData *
1162 {
1163  return rel->rd_tableam->index_fetch_begin(rel);
1164 }
1165 
1166 /*
1167  * Reset index fetch. Typically this will release cross index fetch resources
1168  * held in IndexFetchTableData.
1169  */
1170 static inline void
1172 {
1173  scan->rel->rd_tableam->index_fetch_reset(scan);
1174 }
1175 
1176 /*
1177  * Release resources and deallocate index fetch.
1178  */
1179 static inline void
1181 {
1182  scan->rel->rd_tableam->index_fetch_end(scan);
1183 }
1184 
1185 /*
1186  * Fetches, as part of an index scan, tuple at `tid` into `slot`, after doing
1187  * a visibility test according to `snapshot`. If a tuple was found and passed
1188  * the visibility test, returns true, false otherwise. Note that *tid may be
1189  * modified when we return true (see later remarks on multiple row versions
1190  * reachable via a single index entry).
1191  *
1192  * *call_again needs to be false on the first call to table_index_fetch_tuple() for
1193  * a tid. If there potentially is another tuple matching the tid, *call_again
1194  * will be set to true, signaling that table_index_fetch_tuple() should be called
1195  * again for the same tid.
1196  *
1197  * *all_dead, if all_dead is not NULL, will be set to true by
1198  * table_index_fetch_tuple() iff it is guaranteed that no backend needs to see
1199  * that tuple. Index AMs can use that to avoid returning that tid in future
1200  * searches.
1201  *
1202  * The difference between this function and table_tuple_fetch_row_version()
1203  * is that this function returns the currently visible version of a row if
1204  * the AM supports storing multiple row versions reachable via a single index
1205  * entry (like heap's HOT). Whereas table_tuple_fetch_row_version() only
1206  * evaluates the tuple exactly at `tid`. Outside of index entry ->table tuple
1207  * lookups, table_tuple_fetch_row_version() is what's usually needed.
1208  */
1209 static inline bool
1211  ItemPointer tid,
1212  Snapshot snapshot,
1213  TupleTableSlot *slot,
1214  bool *call_again, bool *all_dead)
1215 {
1216  /*
1217  * We don't expect direct calls to table_index_fetch_tuple with valid
1218  * CheckXidAlive for catalog or regular tables. See detailed comments in
1219  * xact.c where these variables are declared.
1220  */
1222  elog(ERROR, "unexpected table_index_fetch_tuple call during logical decoding");
1223 
1224  return scan->rel->rd_tableam->index_fetch_tuple(scan, tid, snapshot,
1225  slot, call_again,
1226  all_dead);
1227 }
1228 
1229 /*
1230  * This is a convenience wrapper around table_index_fetch_tuple() which
1231  * returns whether there are table tuple items corresponding to an index
1232  * entry. This likely is only useful to verify if there's a conflict in a
1233  * unique index.
1234  */
1235 extern bool table_index_fetch_tuple_check(Relation rel,
1236  ItemPointer tid,
1237  Snapshot snapshot,
1238  bool *all_dead);
1239 
1240 
1241 /* ------------------------------------------------------------------------
1242  * Functions for non-modifying operations on individual tuples
1243  * ------------------------------------------------------------------------
1244  */
1245 
1246 
1247 /*
1248  * Fetch tuple at `tid` into `slot`, after doing a visibility test according to
1249  * `snapshot`. If a tuple was found and passed the visibility test, returns
1250  * true, false otherwise.
1251  *
1252  * See table_index_fetch_tuple's comment about what the difference between
1253  * these functions is. It is correct to use this function outside of index
1254  * entry->table tuple lookups.
1255  */
1256 static inline bool
1258  ItemPointer tid,
1259  Snapshot snapshot,
1260  TupleTableSlot *slot)
1261 {
1262  /*
1263  * We don't expect direct calls to table_tuple_fetch_row_version with
1264  * valid CheckXidAlive for catalog or regular tables. See detailed
1265  * comments in xact.c where these variables are declared.
1266  */
1268  elog(ERROR, "unexpected table_tuple_fetch_row_version call during logical decoding");
1269 
1270  return rel->rd_tableam->tuple_fetch_row_version(rel, tid, snapshot, slot);
1271 }
1272 
1273 /*
1274  * Verify that `tid` is a potentially valid tuple identifier. That doesn't
1275  * mean that the pointed to row needs to exist or be visible, but that
1276  * attempting to fetch the row (e.g. with table_tuple_get_latest_tid() or
1277  * table_tuple_fetch_row_version()) should not error out if called with that
1278  * tid.
1279  *
1280  * `scan` needs to have been started via table_beginscan().
1281  */
1282 static inline bool
1284 {
1285  return scan->rs_rd->rd_tableam->tuple_tid_valid(scan, tid);
1286 }
1287 
1288 /*
1289  * Return the latest version of the tuple at `tid`, by updating `tid` to
1290  * point at the newest version.
1291  */
1293 
1294 /*
1295  * Return true iff tuple in slot satisfies the snapshot.
1296  *
1297  * This assumes the slot's tuple is valid, and of the appropriate type for the
1298  * AM.
1299  *
1300  * Some AMs might modify the data underlying the tuple as a side-effect. If so
1301  * they ought to mark the relevant buffer dirty.
1302  */
1303 static inline bool
1305  Snapshot snapshot)
1306 {
1307  return rel->rd_tableam->tuple_satisfies_snapshot(rel, slot, snapshot);
1308 }
1309 
1310 /*
1311  * Determine which index tuples are safe to delete based on their table TID.
1312  *
1313  * Determines which entries from index AM caller's TM_IndexDeleteOp state
1314  * point to vacuumable table tuples. Entries that are found by tableam to be
1315  * vacuumable are naturally safe for index AM to delete, and so get directly
1316  * marked as deletable. See comments above TM_IndexDelete and comments above
1317  * TM_IndexDeleteOp for full details.
1318  *
1319  * Returns a latestRemovedXid transaction ID that caller generally places in
1320  * its index deletion WAL record. This might be used during subsequent REDO
1321  * of the WAL record when in Hot Standby mode -- a recovery conflict for the
1322  * index deletion operation might be required on the standby.
1323  */
1324 static inline TransactionId
1326 {
1327  return rel->rd_tableam->index_delete_tuples(rel, delstate);
1328 }
1329 
1330 
1331 /* ----------------------------------------------------------------------------
1332  * Functions for manipulations of physical tuples.
1333  * ----------------------------------------------------------------------------
1334  */
1335 
1336 /*
1337  * Insert a tuple from a slot into table AM routine.
1338  *
1339  * The options bitmask allows the caller to specify options that may change the
1340  * behaviour of the AM. The AM will ignore options that it does not support.
1341  *
1342  * If the TABLE_INSERT_SKIP_FSM option is specified, AMs are free to not reuse
1343  * free space in the relation. This can save some cycles when we know the
1344  * relation is new and doesn't contain useful amounts of free space.
1345  * TABLE_INSERT_SKIP_FSM is commonly passed directly to
1346  * RelationGetBufferForTuple. See that method for more information.
1347  *
1348  * TABLE_INSERT_FROZEN should only be specified for inserts into
1349  * relfilenodes created during the current subtransaction and when
1350  * there are no prior snapshots or pre-existing portals open.
1351  * This causes rows to be frozen, which is an MVCC violation and
1352  * requires explicit options chosen by user.
1353  *
1354  * TABLE_INSERT_NO_LOGICAL force-disables the emitting of logical decoding
1355  * information for the tuple. This should solely be used during table rewrites
1356  * where RelationIsLogicallyLogged(relation) is not yet accurate for the new
1357  * relation.
1358  *
1359  * Note that most of these options will be applied when inserting into the
1360  * heap's TOAST table, too, if the tuple requires any out-of-line data.
1361  *
1362  * The BulkInsertState object (if any; bistate can be NULL for default
1363  * behavior) is also just passed through to RelationGetBufferForTuple. If
1364  * `bistate` is provided, table_finish_bulk_insert() needs to be called.
1365  *
1366  * On return the slot's tts_tid and tts_tableOid are updated to reflect the
1367  * insertion. But note that any toasting of fields within the slot is NOT
1368  * reflected in the slots contents.
1369  */
1370 static inline void
1372  int options, struct BulkInsertStateData *bistate)
1373 {
1374  rel->rd_tableam->tuple_insert(rel, slot, cid, options,
1375  bistate);
1376 }
1377 
1378 /*
1379  * Perform a "speculative insertion". These can be backed out afterwards
1380  * without aborting the whole transaction. Other sessions can wait for the
1381  * speculative insertion to be confirmed, turning it into a regular tuple, or
1382  * aborted, as if it never existed. Speculatively inserted tuples behave as
1383  * "value locks" of short duration, used to implement INSERT .. ON CONFLICT.
1384  *
1385  * A transaction having performed a speculative insertion has to either abort,
1386  * or finish the speculative insertion with
1387  * table_tuple_complete_speculative(succeeded = ...).
1388  */
1389 static inline void
1391  CommandId cid, int options,
1392  struct BulkInsertStateData *bistate,
1393  uint32 specToken)
1394 {
1395  rel->rd_tableam->tuple_insert_speculative(rel, slot, cid, options,
1396  bistate, specToken);
1397 }
1398 
1399 /*
1400  * Complete "speculative insertion" started in the same transaction. If
1401  * succeeded is true, the tuple is fully inserted, if false, it's removed.
1402  */
1403 static inline void
1405  uint32 specToken, bool succeeded)
1406 {
1407  rel->rd_tableam->tuple_complete_speculative(rel, slot, specToken,
1408  succeeded);
1409 }
1410 
1411 /*
1412  * Insert multiple tuples into a table.
1413  *
1414  * This is like table_tuple_insert(), but inserts multiple tuples in one
1415  * operation. That's often faster than calling table_tuple_insert() in a loop,
1416  * because e.g. the AM can reduce WAL logging and page locking overhead.
1417  *
1418  * Except for taking `nslots` tuples as input, and an array of TupleTableSlots
1419  * in `slots`, the parameters for table_multi_insert() are the same as for
1420  * table_tuple_insert().
1421  *
1422  * Note: this leaks memory into the current memory context. You can create a
1423  * temporary context before calling this, if that's a problem.
1424  */
1425 static inline void
1426 table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots,
1427  CommandId cid, int options, struct BulkInsertStateData *bistate)
1428 {
1429  rel->rd_tableam->multi_insert(rel, slots, nslots,
1430  cid, options, bistate);
1431 }
1432 
1433 /*
1434  * Delete a tuple.
1435  *
1436  * NB: do not call this directly unless prepared to deal with
1437  * concurrent-update conditions. Use simple_table_tuple_delete instead.
1438  *
1439  * Input parameters:
1440  * relation - table to be modified (caller must hold suitable lock)
1441  * tid - TID of tuple to be deleted
1442  * cid - delete command ID (used for visibility test, and stored into
1443  * cmax if successful)
1444  * crosscheck - if not InvalidSnapshot, also check tuple against this
1445  * wait - true if should wait for any conflicting update to commit/abort
1446  * Output parameters:
1447  * tmfd - filled in failure cases (see below)
1448  * changingPart - true iff the tuple is being moved to another partition
1449  * table due to an update of the partition key. Otherwise, false.
1450  *
1451  * Normal, successful return value is TM_Ok, which means we did actually
1452  * delete it. Failure return codes are TM_SelfModified, TM_Updated, and
1453  * TM_BeingModified (the last only possible if wait == false).
1454  *
1455  * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
1456  * t_xmax, and, if possible, and, if possible, t_cmax. See comments for
1457  * struct TM_FailureData for additional info.
1458  */
1459 static inline TM_Result
1461  Snapshot snapshot, Snapshot crosscheck, bool wait,
1462  TM_FailureData *tmfd, bool changingPart)
1463 {
1464  return rel->rd_tableam->tuple_delete(rel, tid, cid,
1465  snapshot, crosscheck,
1466  wait, tmfd, changingPart);
1467 }
1468 
1469 /*
1470  * Update a tuple.
1471  *
1472  * NB: do not call this directly unless you are prepared to deal with
1473  * concurrent-update conditions. Use simple_table_tuple_update instead.
1474  *
1475  * Input parameters:
1476  * relation - table to be modified (caller must hold suitable lock)
1477  * otid - TID of old tuple to be replaced
1478  * slot - newly constructed tuple data to store
1479  * cid - update command ID (used for visibility test, and stored into
1480  * cmax/cmin if successful)
1481  * crosscheck - if not InvalidSnapshot, also check old tuple against this
1482  * wait - true if should wait for any conflicting update to commit/abort
1483  * Output parameters:
1484  * tmfd - filled in failure cases (see below)
1485  * lockmode - filled with lock mode acquired on tuple
1486  * update_indexes - in success cases this is set to true if new index entries
1487  * are required for this tuple
1488  *
1489  * Normal, successful return value is TM_Ok, which means we did actually
1490  * update it. Failure return codes are TM_SelfModified, TM_Updated, and
1491  * TM_BeingModified (the last only possible if wait == false).
1492  *
1493  * On success, the slot's tts_tid and tts_tableOid are updated to match the new
1494  * stored tuple; in particular, slot->tts_tid is set to the TID where the
1495  * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
1496  * update was done. However, any TOAST changes in the new tuple's
1497  * data are not reflected into *newtup.
1498  *
1499  * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
1500  * t_xmax, and, if possible, t_cmax. See comments for struct TM_FailureData
1501  * for additional info.
1502  */
1503 static inline TM_Result
1505  CommandId cid, Snapshot snapshot, Snapshot crosscheck,
1506  bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode,
1507  bool *update_indexes)
1508 {
1509  return rel->rd_tableam->tuple_update(rel, otid, slot,
1510  cid, snapshot, crosscheck,
1511  wait, tmfd,
1512  lockmode, update_indexes);
1513 }
1514 
1515 /*
1516  * Lock a tuple in the specified mode.
1517  *
1518  * Input parameters:
1519  * relation: relation containing tuple (caller must hold suitable lock)
1520  * tid: TID of tuple to lock
1521  * snapshot: snapshot to use for visibility determinations
1522  * cid: current command ID (used for visibility test, and stored into
1523  * tuple's cmax if lock is successful)
1524  * mode: lock mode desired
1525  * wait_policy: what to do if tuple lock is not available
1526  * flags:
1527  * If TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS, follow the update chain to
1528  * also lock descendant tuples if lock modes don't conflict.
1529  * If TUPLE_LOCK_FLAG_FIND_LAST_VERSION, follow the update chain and lock
1530  * latest version.
1531  *
1532  * Output parameters:
1533  * *slot: contains the target tuple
1534  * *tmfd: filled in failure cases (see below)
1535  *
1536  * Function result may be:
1537  * TM_Ok: lock was successfully acquired
1538  * TM_Invisible: lock failed because tuple was never visible to us
1539  * TM_SelfModified: lock failed because tuple updated by self
1540  * TM_Updated: lock failed because tuple updated by other xact
1541  * TM_Deleted: lock failed because tuple deleted by other xact
1542  * TM_WouldBlock: lock couldn't be acquired and wait_policy is skip
1543  *
1544  * In the failure cases other than TM_Invisible and TM_Deleted, the routine
1545  * fills *tmfd with the tuple's t_ctid, t_xmax, and, if possible, t_cmax. See
1546  * comments for struct TM_FailureData for additional info.
1547  */
1548 static inline TM_Result
1551  LockWaitPolicy wait_policy, uint8 flags,
1552  TM_FailureData *tmfd)
1553 {
1554  return rel->rd_tableam->tuple_lock(rel, tid, snapshot, slot,
1555  cid, mode, wait_policy,
1556  flags, tmfd);
1557 }
1558 
1559 /*
1560  * Perform operations necessary to complete insertions made via
1561  * tuple_insert and multi_insert with a BulkInsertState specified.
1562  */
1563 static inline void
1565 {
1566  /* optional callback */
1567  if (rel->rd_tableam && rel->rd_tableam->finish_bulk_insert)
1568  rel->rd_tableam->finish_bulk_insert(rel, options);
1569 }
1570 
1571 
1572 /* ------------------------------------------------------------------------
1573  * DDL related functionality.
1574  * ------------------------------------------------------------------------
1575  */
1576 
1577 /*
1578  * Create storage for `rel` in `newrnode`, with persistence set to
1579  * `persistence`.
1580  *
1581  * This is used both during relation creation and various DDL operations to
1582  * create a new relfilenode that can be filled from scratch. When creating
1583  * new storage for an existing relfilenode, this should be called before the
1584  * relcache entry has been updated.
1585  *
1586  * *freezeXid, *minmulti are set to the xid / multixact horizon for the table
1587  * that pg_class.{relfrozenxid, relminmxid} have to be set to.
1588  */
1589 static inline void
1591  const RelFileNode *newrnode,
1592  char persistence,
1593  TransactionId *freezeXid,
1594  MultiXactId *minmulti)
1595 {
1596  rel->rd_tableam->relation_set_new_filenode(rel, newrnode, persistence,
1597  freezeXid, minmulti);
1598 }
1599 
1600 /*
1601  * Remove all table contents from `rel`, in a non-transactional manner.
1602  * Non-transactional meaning that there's no need to support rollbacks. This
1603  * commonly only is used to perform truncations for relfilenodes created in the
1604  * current transaction.
1605  */
1606 static inline void
1608 {
1610 }
1611 
1612 /*
1613  * Copy data from `rel` into the new relfilenode `newrnode`. The new
1614  * relfilenode may not have storage associated before this function is
1615  * called. This is only supposed to be used for low level operations like
1616  * changing a relation's tablespace.
1617  */
1618 static inline void
1620 {
1621  rel->rd_tableam->relation_copy_data(rel, newrnode);
1622 }
1623 
1624 /*
1625  * Copy data from `OldTable` into `NewTable`, as part of a CLUSTER or VACUUM
1626  * FULL.
1627  *
1628  * Additional Input parameters:
1629  * - use_sort - if true, the table contents are sorted appropriate for
1630  * `OldIndex`; if false and OldIndex is not InvalidOid, the data is copied
1631  * in that index's order; if false and OldIndex is InvalidOid, no sorting is
1632  * performed
1633  * - OldIndex - see use_sort
1634  * - OldestXmin - computed by vacuum_set_xid_limits(), even when
1635  * not needed for the relation's AM
1636  * - *xid_cutoff - ditto
1637  * - *multi_cutoff - ditto
1638  *
1639  * Output parameters:
1640  * - *xid_cutoff - rel's new relfrozenxid value, may be invalid
1641  * - *multi_cutoff - rel's new relminmxid value, may be invalid
1642  * - *tups_vacuumed - stats, for logging, if appropriate for AM
1643  * - *tups_recently_dead - stats, for logging, if appropriate for AM
1644  */
1645 static inline void
1647  Relation OldIndex,
1648  bool use_sort,
1650  TransactionId *xid_cutoff,
1651  MultiXactId *multi_cutoff,
1652  double *num_tuples,
1653  double *tups_vacuumed,
1654  double *tups_recently_dead)
1655 {
1656  OldTable->rd_tableam->relation_copy_for_cluster(OldTable, NewTable, OldIndex,
1657  use_sort, OldestXmin,
1658  xid_cutoff, multi_cutoff,
1659  num_tuples, tups_vacuumed,
1660  tups_recently_dead);
1661 }
1662 
1663 /*
1664  * Perform VACUUM on the relation. The VACUUM can be triggered by a user or by
1665  * autovacuum. The specific actions performed by the AM will depend heavily on
1666  * the individual AM.
1667  *
1668  * On entry a transaction needs to already been established, and the
1669  * table is locked with a ShareUpdateExclusive lock.
1670  *
1671  * Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through this
1672  * routine, even if (for ANALYZE) it is part of the same VACUUM command.
1673  */
1674 static inline void
1676  BufferAccessStrategy bstrategy)
1677 {
1678  rel->rd_tableam->relation_vacuum(rel, params, bstrategy);
1679 }
1680 
1681 /*
1682  * Prepare to analyze block `blockno` of `scan`. The scan needs to have been
1683  * started with table_beginscan_analyze(). Note that this routine might
1684  * acquire resources like locks that are held until
1685  * table_scan_analyze_next_tuple() returns false.
1686  *
1687  * Returns false if block is unsuitable for sampling, true otherwise.
1688  */
1689 static inline bool
1691  BufferAccessStrategy bstrategy)
1692 {
1693  return scan->rs_rd->rd_tableam->scan_analyze_next_block(scan, blockno,
1694  bstrategy);
1695 }
1696 
1697 /*
1698  * Iterate over tuples in the block selected with
1699  * table_scan_analyze_next_block() (which needs to have returned true, and
1700  * this routine may not have returned false for the same block before). If a
1701  * tuple that's suitable for sampling is found, true is returned and a tuple
1702  * is stored in `slot`.
1703  *
1704  * *liverows and *deadrows are incremented according to the encountered
1705  * tuples.
1706  */
1707 static inline bool
1709  double *liverows, double *deadrows,
1710  TupleTableSlot *slot)
1711 {
1712  return scan->rs_rd->rd_tableam->scan_analyze_next_tuple(scan, OldestXmin,
1713  liverows, deadrows,
1714  slot);
1715 }
1716 
1717 /*
1718  * table_index_build_scan - scan the table to find tuples to be indexed
1719  *
1720  * This is called back from an access-method-specific index build procedure
1721  * after the AM has done whatever setup it needs. The parent table relation
1722  * is scanned to find tuples that should be entered into the index. Each
1723  * such tuple is passed to the AM's callback routine, which does the right
1724  * things to add it to the new index. After we return, the AM's index
1725  * build procedure does whatever cleanup it needs.
1726  *
1727  * The total count of live tuples is returned. This is for updating pg_class
1728  * statistics. (It's annoying not to be able to do that here, but we want to
1729  * merge that update with others; see index_update_stats.) Note that the
1730  * index AM itself must keep track of the number of index tuples; we don't do
1731  * so here because the AM might reject some of the tuples for its own reasons,
1732  * such as being unable to store NULLs.
1733  *
1734  * If 'progress', the PROGRESS_SCAN_BLOCKS_TOTAL counter is updated when
1735  * starting the scan, and PROGRESS_SCAN_BLOCKS_DONE is updated as we go along.
1736  *
1737  * A side effect is to set indexInfo->ii_BrokenHotChain to true if we detect
1738  * any potentially broken HOT chains. Currently, we set this if there are any
1739  * RECENTLY_DEAD or DELETE_IN_PROGRESS entries in a HOT chain, without trying
1740  * very hard to detect whether they're really incompatible with the chain tip.
1741  * This only really makes sense for heap AM, it might need to be generalized
1742  * for other AMs later.
1743  */
1744 static inline double
1746  Relation index_rel,
1747  struct IndexInfo *index_info,
1748  bool allow_sync,
1749  bool progress,
1751  void *callback_state,
1752  TableScanDesc scan)
1753 {
1754  return table_rel->rd_tableam->index_build_range_scan(table_rel,
1755  index_rel,
1756  index_info,
1757  allow_sync,
1758  false,
1759  progress,
1760  0,
1762  callback,
1763  callback_state,
1764  scan);
1765 }
1766 
1767 /*
1768  * As table_index_build_scan(), except that instead of scanning the complete
1769  * table, only the given number of blocks are scanned. Scan to end-of-rel can
1770  * be signaled by passing InvalidBlockNumber as numblocks. Note that
1771  * restricting the range to scan cannot be done when requesting syncscan.
1772  *
1773  * When "anyvisible" mode is requested, all tuples visible to any transaction
1774  * are indexed and counted as live, including those inserted or deleted by
1775  * transactions that are still in progress.
1776  */
1777 static inline double
1779  Relation index_rel,
1780  struct IndexInfo *index_info,
1781  bool allow_sync,
1782  bool anyvisible,
1783  bool progress,
1784  BlockNumber start_blockno,
1785  BlockNumber numblocks,
1787  void *callback_state,
1788  TableScanDesc scan)
1789 {
1790  return table_rel->rd_tableam->index_build_range_scan(table_rel,
1791  index_rel,
1792  index_info,
1793  allow_sync,
1794  anyvisible,
1795  progress,
1796  start_blockno,
1797  numblocks,
1798  callback,
1799  callback_state,
1800  scan);
1801 }
1802 
1803 /*
1804  * table_index_validate_scan - second table scan for concurrent index build
1805  *
1806  * See validate_index() for an explanation.
1807  */
1808 static inline void
1810  Relation index_rel,
1811  struct IndexInfo *index_info,
1812  Snapshot snapshot,
1813  struct ValidateIndexState *state)
1814 {
1815  table_rel->rd_tableam->index_validate_scan(table_rel,
1816  index_rel,
1817  index_info,
1818  snapshot,
1819  state);
1820 }
1821 
1822 
1823 /* ----------------------------------------------------------------------------
1824  * Miscellaneous functionality
1825  * ----------------------------------------------------------------------------
1826  */
1827 
1828 /*
1829  * Return the current size of `rel` in bytes. If `forkNumber` is
1830  * InvalidForkNumber, return the relation's overall size, otherwise the size
1831  * for the indicated fork.
1832  *
1833  * Note that the overall size might not be the equivalent of the sum of sizes
1834  * for the individual forks for some AMs, e.g. because the AMs storage does
1835  * not neatly map onto the builtin types of forks.
1836  */
1837 static inline uint64
1839 {
1840  return rel->rd_tableam->relation_size(rel, forkNumber);
1841 }
1842 
1843 /*
1844  * table_relation_needs_toast_table - does this relation need a toast table?
1845  */
1846 static inline bool
1848 {
1849  return rel->rd_tableam->relation_needs_toast_table(rel);
1850 }
1851 
1852 /*
1853  * Return the OID of the AM that should be used to implement the TOAST table
1854  * for this relation.
1855  */
1856 static inline Oid
1858 {
1859  return rel->rd_tableam->relation_toast_am(rel);
1860 }
1861 
1862 /*
1863  * Fetch all or part of a TOAST value from a TOAST table.
1864  *
1865  * If this AM is never used to implement a TOAST table, then this callback
1866  * is not needed. But, if toasted values are ever stored in a table of this
1867  * type, then you will need this callback.
1868  *
1869  * toastrel is the relation in which the toasted value is stored.
1870  *
1871  * valueid identifes which toast value is to be fetched. For the heap,
1872  * this corresponds to the values stored in the chunk_id column.
1873  *
1874  * attrsize is the total size of the toast value to be fetched.
1875  *
1876  * sliceoffset is the offset within the toast value of the first byte that
1877  * should be fetched.
1878  *
1879  * slicelength is the number of bytes from the toast value that should be
1880  * fetched.
1881  *
1882  * result is caller-allocated space into which the fetched bytes should be
1883  * stored.
1884  */
1885 static inline void
1887  int32 attrsize, int32 sliceoffset,
1888  int32 slicelength, struct varlena *result)
1889 {
1890  toastrel->rd_tableam->relation_fetch_toast_slice(toastrel, valueid,
1891  attrsize,
1892  sliceoffset, slicelength,
1893  result);
1894 }
1895 
1896 
1897 /* ----------------------------------------------------------------------------
1898  * Planner related functionality
1899  * ----------------------------------------------------------------------------
1900  */
1901 
1902 /*
1903  * Estimate the current size of the relation, as an AM specific workhorse for
1904  * estimate_rel_size(). Look there for an explanation of the parameters.
1905  */
1906 static inline void
1908  BlockNumber *pages, double *tuples,
1909  double *allvisfrac)
1910 {
1911  rel->rd_tableam->relation_estimate_size(rel, attr_widths, pages, tuples,
1912  allvisfrac);
1913 }
1914 
1915 
1916 /* ----------------------------------------------------------------------------
1917  * Executor related functionality
1918  * ----------------------------------------------------------------------------
1919  */
1920 
1921 /*
1922  * Prepare to fetch / check / return tuples from `tbmres->blockno` as part of
1923  * a bitmap table scan. `scan` needs to have been started via
1924  * table_beginscan_bm(). Returns false if there are no tuples to be found on
1925  * the page, true otherwise.
1926  *
1927  * Note, this is an optionally implemented function, therefore should only be
1928  * used after verifying the presence (at plan time or such).
1929  */
1930 static inline bool
1932  struct TBMIterateResult *tbmres)
1933 {
1934  /*
1935  * We don't expect direct calls to table_scan_bitmap_next_block with valid
1936  * CheckXidAlive for catalog or regular tables. See detailed comments in
1937  * xact.c where these variables are declared.
1938  */
1940  elog(ERROR, "unexpected table_scan_bitmap_next_block call during logical decoding");
1941 
1942  return scan->rs_rd->rd_tableam->scan_bitmap_next_block(scan,
1943  tbmres);
1944 }
1945 
1946 /*
1947  * Fetch the next tuple of a bitmap table scan into `slot` and return true if
1948  * a visible tuple was found, false otherwise.
1949  * table_scan_bitmap_next_block() needs to previously have selected a
1950  * block (i.e. returned true), and no previous
1951  * table_scan_bitmap_next_tuple() for the same block may have
1952  * returned false.
1953  */
1954 static inline bool
1956  struct TBMIterateResult *tbmres,
1957  TupleTableSlot *slot)
1958 {
1959  /*
1960  * We don't expect direct calls to table_scan_bitmap_next_tuple with valid
1961  * CheckXidAlive for catalog or regular tables. See detailed comments in
1962  * xact.c where these variables are declared.
1963  */
1965  elog(ERROR, "unexpected table_scan_bitmap_next_tuple call during logical decoding");
1966 
1967  return scan->rs_rd->rd_tableam->scan_bitmap_next_tuple(scan,
1968  tbmres,
1969  slot);
1970 }
1971 
1972 /*
1973  * Prepare to fetch tuples from the next block in a sample scan. Returns false
1974  * if the sample scan is finished, true otherwise. `scan` needs to have been
1975  * started via table_beginscan_sampling().
1976  *
1977  * This will call the TsmRoutine's NextSampleBlock() callback if necessary
1978  * (i.e. NextSampleBlock is not NULL), or perform a sequential scan over the
1979  * underlying relation.
1980  */
1981 static inline bool
1983  struct SampleScanState *scanstate)
1984 {
1985  /*
1986  * We don't expect direct calls to table_scan_sample_next_block with valid
1987  * CheckXidAlive for catalog or regular tables. See detailed comments in
1988  * xact.c where these variables are declared.
1989  */
1991  elog(ERROR, "unexpected table_scan_sample_next_block call during logical decoding");
1992  return scan->rs_rd->rd_tableam->scan_sample_next_block(scan, scanstate);
1993 }
1994 
1995 /*
1996  * Fetch the next sample tuple into `slot` and return true if a visible tuple
1997  * was found, false otherwise. table_scan_sample_next_block() needs to
1998  * previously have selected a block (i.e. returned true), and no previous
1999  * table_scan_sample_next_tuple() for the same block may have returned false.
2000  *
2001  * This will call the TsmRoutine's NextSampleTuple() callback.
2002  */
2003 static inline bool
2005  struct SampleScanState *scanstate,
2006  TupleTableSlot *slot)
2007 {
2008  /*
2009  * We don't expect direct calls to table_scan_sample_next_tuple with valid
2010  * CheckXidAlive for catalog or regular tables. See detailed comments in
2011  * xact.c where these variables are declared.
2012  */
2014  elog(ERROR, "unexpected table_scan_sample_next_tuple call during logical decoding");
2015  return scan->rs_rd->rd_tableam->scan_sample_next_tuple(scan, scanstate,
2016  slot);
2017 }
2018 
2019 
2020 /* ----------------------------------------------------------------------------
2021  * Functions to make modifications a bit simpler.
2022  * ----------------------------------------------------------------------------
2023  */
2024 
2025 extern void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot);
2026 extern void simple_table_tuple_delete(Relation rel, ItemPointer tid,
2027  Snapshot snapshot);
2028 extern void simple_table_tuple_update(Relation rel, ItemPointer otid,
2029  TupleTableSlot *slot, Snapshot snapshot,
2030  bool *update_indexes);
2031 
2032 
2033 /* ----------------------------------------------------------------------------
2034  * Helper functions to implement parallel scans for block oriented AMs.
2035  * ----------------------------------------------------------------------------
2036  */
2037 
2040  ParallelTableScanDesc pscan);
2042  ParallelTableScanDesc pscan);
2044  ParallelBlockTableScanWorker pbscanwork,
2047  ParallelBlockTableScanWorker pbscanwork,
2049 
2050 
2051 /* ----------------------------------------------------------------------------
2052  * Helper functions to implement relation sizing for block oriented AMs.
2053  * ----------------------------------------------------------------------------
2054  */
2055 
2056 extern uint64 table_block_relation_size(Relation rel, ForkNumber forkNumber);
2058  int32 *attr_widths,
2059  BlockNumber *pages,
2060  double *tuples,
2061  double *allvisfrac,
2062  Size overhead_bytes_per_tuple,
2063  Size usable_bytes_per_page);
2064 
2065 /* ----------------------------------------------------------------------------
2066  * Functions in tableamapi.c
2067  * ----------------------------------------------------------------------------
2068  */
2069 
2070 extern const TableAmRoutine *GetTableAmRoutine(Oid amhandler);
2071 extern const TableAmRoutine *GetHeapamTableAmRoutine(void);
2072 extern bool check_default_table_access_method(char **newval, void **extra,
2073  GucSource source);
2074 
2075 #endif /* TABLEAM_H */
bool(* scan_getnextslot)(TableScanDesc scan, ScanDirection direction, TupleTableSlot *slot)
Definition: tableam.h:325
void(* relation_estimate_size)(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: tableam.h:747
signed short int16
Definition: c.h:428
static bool table_scan_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition: tableam.h:1094
Oid tts_tableOid
Definition: tuptable.h:131
uint32 CommandId
Definition: c.h:601
ItemPointerData ctid
Definition: tableam.h:126
static bool table_scan_bitmap_next_tuple(TableScanDesc scan, struct TBMIterateResult *tbmres, TupleTableSlot *slot)
Definition: tableam.h:1955
static void table_relation_estimate_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: tableam.h:1907
static TransactionId table_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
Definition: tableam.h:1325
static PgChecksumMode mode
Definition: pg_checksums.c:61
TM_IndexDelete * deltids
Definition: tableam.h:228
Oid(* relation_toast_am)(Relation rel)
Definition: tableam.h:718
LockTupleMode
Definition: lockoptions.h:49
struct TM_IndexStatus TM_IndexStatus
ScanOptions
Definition: tableam.h:45
NodeTag type
Definition: tableam.h:267
static void table_relation_copy_data(Relation rel, const RelFileNode *newrnode)
Definition: tableam.h:1619
TM_Result(* tuple_update)(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, bool *update_indexes)
Definition: tableam.h:518
void table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid)
Definition: tableam.c:246
void table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan, Snapshot snapshot)
Definition: tableam.c:155
static TableScanDesc table_beginscan_tidrange(Relation rel, Snapshot snapshot, ItemPointer mintid, ItemPointer maxtid)
Definition: tableam.h:1057
uint32 TransactionId
Definition: c.h:587
Size table_block_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.c:400
Size table_block_parallelscan_estimate(Relation rel)
Definition: tableam.c:394
struct TableAmRoutine TableAmRoutine
struct IndexFetchTableData *(* index_fetch_begin)(Relation rel)
Definition: tableam.h:398
void(* index_fetch_reset)(struct IndexFetchTableData *data)
Definition: tableam.h:404
void table_block_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.c:418
void(* scan_end)(TableScanDesc scan)
Definition: tableam.h:312
bool(* scan_sample_next_block)(TableScanDesc scan, struct SampleScanState *scanstate)
Definition: tableam.h:831
uint64(* relation_size)(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:701
static void table_relation_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, int32 sliceoffset, int32 slicelength, struct varlena *result)
Definition: tableam.h:1886
char * default_table_access_method
Definition: tableam.c:48
CommandId cmax
Definition: tableam.h:128
double(* index_build_range_scan)(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, bool allow_sync, bool anyvisible, bool progress, BlockNumber start_blockno, BlockNumber numblocks, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition: tableam.h:668
unsigned char uint8
Definition: c.h:439
static void table_relation_vacuum(Relation rel, struct VacuumParams *params, BufferAccessStrategy bstrategy)
Definition: tableam.h:1675
bool knowndeletable
Definition: tableam.h:196
static IndexFetchTableData * table_index_fetch_begin(Relation rel)
Definition: tableam.h:1161
void(* index_validate_scan)(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, Snapshot snapshot, struct ValidateIndexState *state)
Definition: tableam.h:681
static void table_finish_bulk_insert(Relation rel, int options)
Definition: tableam.h:1564
static bool table_scan_sample_next_block(TableScanDesc scan, struct SampleScanState *scanstate)
Definition: tableam.h:1982
uint32 BlockNumber
Definition: block.h:31
static TableScanDesc table_beginscan_sampling(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, bool allow_strat, bool allow_sync, bool allow_pagemode)
Definition: tableam.h:944
void table_block_parallelscan_startblock_init(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
Definition: tableam.c:433
bool(* tuple_fetch_row_version)(Relation rel, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot)
Definition: tableam.h:448
OffsetNumber idxoffnum
Definition: tableam.h:195
static bool table_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition: tableam.h:1032
void(* relation_nontransactional_truncate)(Relation rel)
Definition: tableam.h:590
unsigned int Oid
Definition: postgres_ext.h:31
NodeTag
Definition: nodes.h:26
uint32 rs_flags
Definition: relscan.h:47
static void table_rescan(TableScanDesc scan, struct ScanKeyData *key)
Definition: tableam.h:1000
static void table_tuple_complete_speculative(Relation rel, TupleTableSlot *slot, uint32 specToken, bool succeeded)
Definition: tableam.h:1404
bool(* scan_analyze_next_block)(TableScanDesc scan, BlockNumber blockno, BufferAccessStrategy bstrategy)
Definition: tableam.h:649
static TableScanDesc table_beginscan_strat(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, bool allow_strat, bool allow_sync)
Definition: tableam.h:907
static void table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate)
Definition: tableam.h:1371
uint64 table_block_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.c:628
signed int int32
Definition: c.h:429
void table_scan_update_snapshot(TableScanDesc scan, Snapshot snapshot)
Definition: tableam.c:124
GucSource
Definition: guc.h:105
bool synchronize_seqscans
Definition: tableam.c:49
uint16 OffsetNumber
Definition: off.h:24
void(* tuple_insert_speculative)(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate, uint32 specToken)
Definition: tableam.h:490
Definition: type.h:89
void(* scan_rescan)(TableScanDesc scan, struct ScanKeyData *key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode)
Definition: tableam.h:318
static bool table_scan_sample_next_tuple(TableScanDesc scan, struct SampleScanState *scanstate, TupleTableSlot *slot)
Definition: tableam.h:2004
BlockNumber table_block_parallelscan_nextpage(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
Definition: tableam.c:503
bool table_index_fetch_tuple_check(Relation rel, ItemPointer tid, Snapshot snapshot, bool *all_dead)
Definition: tableam.c:219
void(* tuple_insert)(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate)
Definition: tableam.h:485
static void table_rescan_tidrange(TableScanDesc sscan, ItemPointer mintid, ItemPointer maxtid)
Definition: tableam.h:1078
static void table_relation_nontransactional_truncate(Relation rel)
Definition: tableam.h:1607
static bool table_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno, BufferAccessStrategy bstrategy)
Definition: tableam.h:1690
TransactionId xmax
Definition: tableam.h:127
void(* finish_bulk_insert)(Relation rel, int options)
Definition: tableam.h:552
#define ERROR
Definition: elog.h:45
static TableScanDesc table_beginscan(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key)
Definition: tableam.h:883
void(* tuple_complete_speculative)(Relation rel, TupleTableSlot *slot, uint32 specToken, bool succeeded)
Definition: tableam.h:498
static double table_index_build_scan(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, bool allow_sync, bool progress, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition: tableam.h:1745
static TM_Result table_tuple_lock(Relation rel, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd)
Definition: tableam.h:1549
bool bsysscan
Definition: xact.c:96
bool(* scan_bitmap_next_block)(TableScanDesc scan, struct TBMIterateResult *tbmres)
Definition: tableam.h:788
void(* relation_set_new_filenode)(Relation rel, const RelFileNode *newrnode, char persistence, TransactionId *freezeXid, MultiXactId *minmulti)
Definition: tableam.h:576
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
struct TM_IndexDelete TM_IndexDelete
void simple_table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, Snapshot snapshot, bool *update_indexes)
Definition: tableam.c:346
TableScanDesc(* scan_begin)(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, ParallelTableScanDesc pscan, uint32 flags)
Definition: tableam.h:302
void(* multi_insert)(Relation rel, TupleTableSlot **slots, int nslots, CommandId cid, int options, struct BulkInsertStateData *bistate)
Definition: tableam.h:504
void(* relation_copy_for_cluster)(Relation NewTable, Relation OldTable, Relation OldIndex, bool use_sort, TransactionId OldestXmin, TransactionId *xid_cutoff, MultiXactId *multi_cutoff, double *num_tuples, double *tups_vacuumed, double *tups_recently_dead)
Definition: tableam.h:602
bool(* scan_analyze_next_tuple)(TableScanDesc scan, TransactionId OldestXmin, double *liverows, double *deadrows, TupleTableSlot *slot)
Definition: tableam.h:661
bool(* scan_bitmap_next_tuple)(TableScanDesc scan, struct TBMIterateResult *tbmres, TupleTableSlot *slot)
Definition: tableam.h:802
static double table_index_build_range_scan(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, bool allow_sync, bool anyvisible, bool progress, BlockNumber start_blockno, BlockNumber numblocks, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition: tableam.h:1778
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1838
TableScanDesc table_beginscan_parallel(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.c:175
ScanDirection
Definition: sdir.h:22
static TransactionId OldestXmin
Definition: vacuumlazy.c:335
Size table_parallelscan_estimate(Relation rel, Snapshot snapshot)
Definition: tableam.c:140
unsigned int uint32
Definition: c.h:441
void(* relation_copy_data)(Relation rel, const RelFileNode *newrnode)
Definition: tableam.h:598
static void table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots, CommandId cid, int options, struct BulkInsertStateData *bistate)
Definition: tableam.h:1426
void(* parallelscan_reinitialize)(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.h:381
bool promising
Definition: tableam.h:199
struct TM_IndexDeleteOp TM_IndexDeleteOp
ForkNumber
Definition: relpath.h:40
static void table_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.h:1143
bool(* scan_getnextslot_tidrange)(TableScanDesc scan, ScanDirection direction, TupleTableSlot *slot)
Definition: tableam.h:354
static bool table_tuple_fetch_row_version(Relation rel, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot)
Definition: tableam.h:1257
static char ** options
TransactionId CheckXidAlive
Definition: xact.c:95
int progress
Definition: pgbench.c:236
TM_Result
Definition: tableam.h:71
uintptr_t Datum
Definition: postgres.h:367
static void table_tuple_insert_speculative(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate, uint32 specToken)
Definition: tableam.h:1390
TM_IndexStatus * status
Definition: tableam.h:229
static bool table_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
Definition: tableam.h:1210
const struct TableAmRoutine * rd_tableam
Definition: rel.h:172
static void table_rescan_set_params(TableScanDesc scan, struct ScanKeyData *key, bool allow_strat, bool allow_sync, bool allow_pagemode)
Definition: tableam.h:1015
static TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: tableam.h:1460
ItemPointerData tid
Definition: tableam.h:189
bool(* relation_needs_toast_table)(Relation rel)
Definition: tableam.h:711
static void table_index_fetch_reset(struct IndexFetchTableData *scan)
Definition: tableam.h:1171
TransactionId MultiXactId
Definition: c.h:597
void(* scan_set_tidrange)(TableScanDesc scan, ItemPointer mintid, ItemPointer maxtid)
Definition: tableam.h:346
void(* index_fetch_end)(struct IndexFetchTableData *data)
Definition: tableam.h:409
TM_Result(* tuple_lock)(Relation rel, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd)
Definition: tableam.h:530
TupleTableSlot * table_slot_create(Relation rel, List **reglist)
Definition: tableam.c:91
static bool table_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, Snapshot snapshot)
Definition: tableam.h:1304
#define Assert(condition)
Definition: c.h:804
static bool table_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin, double *liverows, double *deadrows, TupleTableSlot *slot)
Definition: tableam.h:1708
Definition: regguts.h:317
Definition: tableam.h:77
bool(* tuple_satisfies_snapshot)(Relation rel, TupleTableSlot *slot, Snapshot snapshot)
Definition: tableam.h:470
const TableAmRoutine * GetHeapamTableAmRoutine(void)
static rewind_source * source
Definition: pg_rewind.c:79
size_t Size
Definition: c.h:540
#define InvalidBlockNumber
Definition: block.h:33
struct TM_FailureData TM_FailureData
#define newval
bool(* scan_sample_next_tuple)(TableScanDesc scan, struct SampleScanState *scanstate, TupleTableSlot *slot)
Definition: tableam.h:847
void table_block_relation_estimate_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
Definition: tableam.c:668
void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot)
Definition: tableam.c:287
static void table_relation_set_new_filenode(Relation rel, const RelFileNode *newrnode, char persistence, TransactionId *freezeXid, MultiXactId *minmulti)
Definition: tableam.h:1590
Relation rs_rd
Definition: relscan.h:34
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:991
void simple_table_tuple_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
Definition: tableam.c:301
static Datum values[MAXATTR]
Definition: bootstrap.c:165
static TableScanDesc table_beginscan_tid(Relation rel, Snapshot snapshot)
Definition: tableam.h:967
TableScanDesc table_beginscan_catalog(Relation rel, int nkeys, struct ScanKeyData *key)
Definition: tableam.c:112
bool(* tuple_tid_valid)(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:456
TransactionId(* index_delete_tuples)(Relation rel, TM_IndexDeleteOp *delstate)
Definition: tableam.h:475
static void table_index_validate_scan(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, Snapshot snapshot, struct ValidateIndexState *state)
Definition: tableam.h:1809
bool check_default_table_access_method(char **newval, void **extra, GucSource source)
Definition: tableamapi.c:111
const TupleTableSlotOps * table_slot_callbacks(Relation rel)
Definition: tableam.c:58
static bool table_scan_bitmap_next_block(TableScanDesc scan, struct TBMIterateResult *tbmres)
Definition: tableam.h:1931
#define elog(elevel,...)
Definition: elog.h:227
int16 freespace
Definition: tableam.h:200
static TableScanDesc table_beginscan_bm(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key)
Definition: tableam.h:928
static bool table_tuple_tid_valid(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:1283
#define unlikely(x)
Definition: c.h:273
Definition: c.h:621
static bool table_relation_needs_toast_table(Relation rel)
Definition: tableam.h:1847
int bottomupfreespace
Definition: tableam.h:224
void(* relation_vacuum)(Relation onerel, struct VacuumParams *params, BufferAccessStrategy bstrategy)
Definition: tableam.h:628
static Oid table_relation_toast_am(Relation rel)
Definition: tableam.h:1857
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, bool *update_indexes)
Definition: tableam.h:1504
bool traversed
Definition: tableam.h:129
static void table_index_fetch_end(struct IndexFetchTableData *scan)
Definition: tableam.h:1180
Definition: pg_list.h:50
const TableAmRoutine * GetTableAmRoutine(Oid amhandler)
Definition: tableamapi.c:34
bool(* index_fetch_tuple)(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
Definition: tableam.h:431
TM_Result(* tuple_delete)(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: tableam.h:508
#define RelationGetRelid(relation)
Definition: rel.h:457
static void table_relation_copy_for_cluster(Relation OldTable, Relation NewTable, Relation OldIndex, bool use_sort, TransactionId OldestXmin, TransactionId *xid_cutoff, MultiXactId *multi_cutoff, double *num_tuples, double *tups_vacuumed, double *tups_recently_dead)
Definition: tableam.h:1646
LockWaitPolicy
Definition: lockoptions.h:36
void(* IndexBuildCallback)(Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
Definition: tableam.h:246
void(* relation_fetch_toast_slice)(Relation toastrel, Oid valueid, int32 attrsize, int32 sliceoffset, int32 slicelength, struct varlena *result)
Definition: tableam.h:725
unsigned char bool
Definition: c.h:391
static TableScanDesc table_beginscan_analyze(Relation rel)
Definition: tableam.h:980
struct TableScanDescData * TableScanDesc
Definition: relscan.h:53