PostgreSQL Source Code  git master
tableam.c
Go to the documentation of this file.
1 /*----------------------------------------------------------------------
2  *
3  * tableam.c
4  * Table access method routines too big to be inline functions.
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/access/table/tableam.c
12  *
13  * NOTES
14  * Note that most function in here are documented in tableam.h, rather than
15  * here. That's because there's a lot of inline functions in tableam.h and
16  * it'd be harder to understand if one constantly had to switch between files.
17  *
18  *----------------------------------------------------------------------
19  */
20 #include "postgres.h"
21 
22 #include <math.h>
23 
24 #include "access/heapam.h" /* for ss_* */
25 #include "access/tableam.h"
26 #include "access/xact.h"
27 #include "optimizer/plancat.h"
28 #include "storage/bufmgr.h"
29 #include "storage/shmem.h"
30 #include "storage/smgr.h"
31 
32 
33 /* GUC variables */
36 
37 
38 /* ----------------------------------------------------------------------------
39  * Slot functions.
40  * ----------------------------------------------------------------------------
41  */
42 
43 const TupleTableSlotOps *
45 {
46  const TupleTableSlotOps *tts_cb;
47 
48  if (relation->rd_tableam)
49  tts_cb = relation->rd_tableam->slot_callbacks(relation);
50  else if (relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
51  {
52  /*
53  * Historically FDWs expect to store heap tuples in slots. Continue
54  * handing them one, to make it less painful to adapt FDWs to new
55  * versions. The cost of a heap slot over a virtual slot is pretty
56  * small.
57  */
58  tts_cb = &TTSOpsHeapTuple;
59  }
60  else
61  {
62  /*
63  * These need to be supported, as some parts of the code (like COPY)
64  * need to create slots for such relations too. It seems better to
65  * centralize the knowledge that a heap slot is the right thing in
66  * that case here.
67  */
68  Assert(relation->rd_rel->relkind == RELKIND_VIEW ||
69  relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE);
70  tts_cb = &TTSOpsVirtual;
71  }
72 
73  return tts_cb;
74 }
75 
77 table_slot_create(Relation relation, List **reglist)
78 {
79  const TupleTableSlotOps *tts_cb;
80  TupleTableSlot *slot;
81 
82  tts_cb = table_slot_callbacks(relation);
83  slot = MakeSingleTupleTableSlot(RelationGetDescr(relation), tts_cb);
84 
85  if (reglist)
86  *reglist = lappend(*reglist, slot);
87 
88  return slot;
89 }
90 
91 
92 /* ----------------------------------------------------------------------------
93  * Table scan functions.
94  * ----------------------------------------------------------------------------
95  */
96 
98 table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
99 {
100  uint32 flags = SO_TYPE_SEQSCAN |
102  Oid relid = RelationGetRelid(relation);
103  Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
104 
105  return relation->rd_tableam->scan_begin(relation, snapshot, nkeys, key,
106  NULL, flags);
107 }
108 
109 void
111 {
112  Assert(IsMVCCSnapshot(snapshot));
113 
114  RegisterSnapshot(snapshot);
115  scan->rs_snapshot = snapshot;
116  scan->rs_flags |= SO_TEMP_SNAPSHOT;
117 }
118 
119 
120 /* ----------------------------------------------------------------------------
121  * Parallel table scan related functions.
122  * ----------------------------------------------------------------------------
123  */
124 
125 Size
127 {
128  Size sz = 0;
129 
130  if (IsMVCCSnapshot(snapshot))
131  sz = add_size(sz, EstimateSnapshotSpace(snapshot));
132  else
133  Assert(snapshot == SnapshotAny);
134 
135  sz = add_size(sz, rel->rd_tableam->parallelscan_estimate(rel));
136 
137  return sz;
138 }
139 
140 void
142  Snapshot snapshot)
143 {
144  Size snapshot_off = rel->rd_tableam->parallelscan_initialize(rel, pscan);
145 
146  pscan->phs_snapshot_off = snapshot_off;
147 
148  if (IsMVCCSnapshot(snapshot))
149  {
150  SerializeSnapshot(snapshot, (char *) pscan + pscan->phs_snapshot_off);
151  pscan->phs_snapshot_any = false;
152  }
153  else
154  {
155  Assert(snapshot == SnapshotAny);
156  pscan->phs_snapshot_any = true;
157  }
158 }
159 
162 {
163  Snapshot snapshot;
164  uint32 flags = SO_TYPE_SEQSCAN |
166 
167  Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
168 
169  if (!parallel_scan->phs_snapshot_any)
170  {
171  /* Snapshot was serialized -- restore it */
172  snapshot = RestoreSnapshot((char *) parallel_scan +
173  parallel_scan->phs_snapshot_off);
174  RegisterSnapshot(snapshot);
175  flags |= SO_TEMP_SNAPSHOT;
176  }
177  else
178  {
179  /* SnapshotAny passed by caller (not serialized) */
180  snapshot = SnapshotAny;
181  }
182 
183  return relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
184  parallel_scan, flags);
185 }
186 
187 
188 /* ----------------------------------------------------------------------------
189  * Index scan related functions.
190  * ----------------------------------------------------------------------------
191  */
192 
193 /*
194  * To perform that check simply start an index scan, create the necessary
195  * slot, do the heap lookup, and shut everything down again. This could be
196  * optimized, but is unlikely to matter from a performance POV. If there
197  * frequently are live index pointers also matching a unique index key, the
198  * CPU overhead of this routine is unlikely to matter.
199  *
200  * Note that *tid may be modified when we return true if the AM supports
201  * storing multiple row versions reachable via a single index entry (like
202  * heap's HOT).
203  */
204 bool
206  ItemPointer tid,
207  Snapshot snapshot,
208  bool *all_dead)
209 {
210  IndexFetchTableData *scan;
211  TupleTableSlot *slot;
212  bool call_again = false;
213  bool found;
214 
215  slot = table_slot_create(rel, NULL);
216  scan = table_index_fetch_begin(rel);
217  found = table_index_fetch_tuple(scan, tid, snapshot, slot, &call_again,
218  all_dead);
219  table_index_fetch_end(scan);
221 
222  return found;
223 }
224 
225 
226 /* ------------------------------------------------------------------------
227  * Functions for non-modifying operations on individual tuples
228  * ------------------------------------------------------------------------
229  */
230 
231 void
233 {
234  Relation rel = scan->rs_rd;
235  const TableAmRoutine *tableam = rel->rd_tableam;
236 
237  /*
238  * Since this can be called with user-supplied TID, don't trust the input
239  * too much.
240  */
241  if (!tableam->tuple_tid_valid(scan, tid))
242  ereport(ERROR,
243  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
244  errmsg("tid (%u, %u) is not valid for relation \"%s\"",
247  RelationGetRelationName(rel))));
248 
249  tableam->tuple_get_latest_tid(scan, tid);
250 }
251 
252 
253 /* ----------------------------------------------------------------------------
254  * Functions to make modifications a bit simpler.
255  * ----------------------------------------------------------------------------
256  */
257 
258 /*
259  * simple_table_tuple_insert - insert a tuple
260  *
261  * Currently, this routine differs from table_tuple_insert only in supplying a
262  * default command ID and not allowing access to the speedup options.
263  */
264 void
266 {
267  table_tuple_insert(rel, slot, GetCurrentCommandId(true), 0, NULL);
268 }
269 
270 /*
271  * simple_table_tuple_delete - delete a tuple
272  *
273  * This routine may be used to delete a tuple when concurrent updates of
274  * the target tuple are not expected (for example, because we have a lock
275  * on the relation associated with the tuple). Any failure is reported
276  * via ereport().
277  */
278 void
280 {
281  TM_Result result;
282  TM_FailureData tmfd;
283 
284  result = table_tuple_delete(rel, tid,
285  GetCurrentCommandId(true),
286  snapshot, InvalidSnapshot,
287  true /* wait for commit */ ,
288  &tmfd, false /* changingPart */ );
289 
290  switch (result)
291  {
292  case TM_SelfModified:
293  /* Tuple was already updated in current command? */
294  elog(ERROR, "tuple already updated by self");
295  break;
296 
297  case TM_Ok:
298  /* done successfully */
299  break;
300 
301  case TM_Updated:
302  elog(ERROR, "tuple concurrently updated");
303  break;
304 
305  case TM_Deleted:
306  elog(ERROR, "tuple concurrently deleted");
307  break;
308 
309  default:
310  elog(ERROR, "unrecognized table_tuple_delete status: %u", result);
311  break;
312  }
313 }
314 
315 /*
316  * simple_table_tuple_update - replace a tuple
317  *
318  * This routine may be used to update a tuple when concurrent updates of
319  * the target tuple are not expected (for example, because we have a lock
320  * on the relation associated with the tuple). Any failure is reported
321  * via ereport().
322  */
323 void
325  TupleTableSlot *slot,
326  Snapshot snapshot,
327  bool *update_indexes)
328 {
329  TM_Result result;
330  TM_FailureData tmfd;
331  LockTupleMode lockmode;
332 
333  result = table_tuple_update(rel, otid, slot,
334  GetCurrentCommandId(true),
335  snapshot, InvalidSnapshot,
336  true /* wait for commit */ ,
337  &tmfd, &lockmode, update_indexes);
338 
339  switch (result)
340  {
341  case TM_SelfModified:
342  /* Tuple was already updated in current command? */
343  elog(ERROR, "tuple already updated by self");
344  break;
345 
346  case TM_Ok:
347  /* done successfully */
348  break;
349 
350  case TM_Updated:
351  elog(ERROR, "tuple concurrently updated");
352  break;
353 
354  case TM_Deleted:
355  elog(ERROR, "tuple concurrently deleted");
356  break;
357 
358  default:
359  elog(ERROR, "unrecognized table_tuple_update status: %u", result);
360  break;
361  }
362 
363 }
364 
365 
366 /* ----------------------------------------------------------------------------
367  * Helper functions to implement parallel scans for block oriented AMs.
368  * ----------------------------------------------------------------------------
369  */
370 
371 Size
373 {
374  return sizeof(ParallelBlockTableScanDescData);
375 }
376 
377 Size
379 {
381 
382  bpscan->base.phs_relid = RelationGetRelid(rel);
383  bpscan->phs_nblocks = RelationGetNumberOfBlocks(rel);
384  /* compare phs_syncscan initialization to similar logic in initscan */
386  !RelationUsesLocalBuffers(rel) &&
387  bpscan->phs_nblocks > NBuffers / 4;
388  SpinLockInit(&bpscan->phs_mutex);
390  pg_atomic_init_u64(&bpscan->phs_nallocated, 0);
391 
392  return sizeof(ParallelBlockTableScanDescData);
393 }
394 
395 void
397 {
399 
400  pg_atomic_write_u64(&bpscan->phs_nallocated, 0);
401 }
402 
403 /*
404  * find and set the scan's startblock
405  *
406  * Determine where the parallel seq scan should start. This function may be
407  * called many times, once by each parallel worker. We must be careful only
408  * to set the startblock once.
409  */
410 void
412 {
413  BlockNumber sync_startpage = InvalidBlockNumber;
414 
415 retry:
416  /* Grab the spinlock. */
417  SpinLockAcquire(&pbscan->phs_mutex);
418 
419  /*
420  * If the scan's startblock has not yet been initialized, we must do so
421  * now. If this is not a synchronized scan, we just start at block 0, but
422  * if it is a synchronized scan, we must get the starting position from
423  * the synchronized scan machinery. We can't hold the spinlock while
424  * doing that, though, so release the spinlock, get the information we
425  * need, and retry. If nobody else has initialized the scan in the
426  * meantime, we'll fill in the value we fetched on the second time
427  * through.
428  */
429  if (pbscan->phs_startblock == InvalidBlockNumber)
430  {
431  if (!pbscan->base.phs_syncscan)
432  pbscan->phs_startblock = 0;
433  else if (sync_startpage != InvalidBlockNumber)
434  pbscan->phs_startblock = sync_startpage;
435  else
436  {
437  SpinLockRelease(&pbscan->phs_mutex);
438  sync_startpage = ss_get_location(rel, pbscan->phs_nblocks);
439  goto retry;
440  }
441  }
442  SpinLockRelease(&pbscan->phs_mutex);
443 }
444 
445 /*
446  * get the next page to scan
447  *
448  * Get the next page to scan. Even if there are no pages left to scan,
449  * another backend could have grabbed a page to scan and not yet finished
450  * looking at it, so it doesn't follow that the scan is done when the first
451  * backend gets an InvalidBlockNumber return.
452  */
455 {
456  BlockNumber page;
457  uint64 nallocated;
458 
459  /*
460  * phs_nallocated tracks how many pages have been allocated to workers
461  * already. When phs_nallocated >= rs_nblocks, all blocks have been
462  * allocated.
463  *
464  * Because we use an atomic fetch-and-add to fetch the current value, the
465  * phs_nallocated counter will exceed rs_nblocks, because workers will
466  * still increment the value, when they try to allocate the next block but
467  * all blocks have been allocated already. The counter must be 64 bits
468  * wide because of that, to avoid wrapping around when rs_nblocks is close
469  * to 2^32.
470  *
471  * The actual page to return is calculated by adding the counter to the
472  * starting block number, modulo nblocks.
473  */
474  nallocated = pg_atomic_fetch_add_u64(&pbscan->phs_nallocated, 1);
475  if (nallocated >= pbscan->phs_nblocks)
476  page = InvalidBlockNumber; /* all blocks have been allocated */
477  else
478  page = (nallocated + pbscan->phs_startblock) % pbscan->phs_nblocks;
479 
480  /*
481  * Report scan location. Normally, we report the current page number.
482  * When we reach the end of the scan, though, we report the starting page,
483  * not the ending page, just so the starting positions for later scans
484  * doesn't slew backwards. We only report the position at the end of the
485  * scan once, though: subsequent callers will report nothing.
486  */
487  if (pbscan->base.phs_syncscan)
488  {
489  if (page != InvalidBlockNumber)
490  ss_report_location(rel, page);
491  else if (nallocated == pbscan->phs_nblocks)
492  ss_report_location(rel, pbscan->phs_startblock);
493  }
494 
495  return page;
496 }
497 
498 /* ----------------------------------------------------------------------------
499  * Helper functions to implement relation sizing for block oriented AMs.
500  * ----------------------------------------------------------------------------
501  */
502 
503 /*
504  * table_block_relation_size
505  *
506  * If a table AM uses the various relation forks as the sole place where data
507  * is stored, and if it uses them in the expected manner (e.g. the actual data
508  * is in the main fork rather than some other), it can use this implementation
509  * of the relation_size callback rather than implementing its own.
510  */
511 uint64
513 {
514  uint64 nblocks = 0;
515 
516  /* Open it at the smgr level if not already done */
517  RelationOpenSmgr(rel);
518 
519  /* InvalidForkNumber indicates returning the size for all forks */
520  if (forkNumber == InvalidForkNumber)
521  {
522  for (int i = 0; i < MAX_FORKNUM; i++)
523  nblocks += smgrnblocks(rel->rd_smgr, i);
524  }
525  else
526  nblocks = smgrnblocks(rel->rd_smgr, forkNumber);
527 
528  return nblocks * BLCKSZ;
529 }
530 
531 /*
532  * table_block_relation_estimate_size
533  *
534  * This function can't be directly used as the implementation of the
535  * relation_estimate_size callback, because it has a few additional parameters.
536  * Instead, it is intended to be used as a helper function; the caller can
537  * pass through the arguments to its relation_estimate_size function plus the
538  * additional values required here.
539  *
540  * overhead_bytes_per_tuple should contain the approximate number of bytes
541  * of storage required to store a tuple above and beyond what is required for
542  * the tuple data proper. Typically, this would include things like the
543  * size of the tuple header and item pointer. This is only used for query
544  * planning, so a table AM where the value is not constant could choose to
545  * pass a "best guess".
546  *
547  * usable_bytes_per_page should contain the approximate number of bytes per
548  * page usable for tuple data, excluding the page header and any anticipated
549  * special space.
550  */
551 void
553  BlockNumber *pages, double *tuples,
554  double *allvisfrac,
555  Size overhead_bytes_per_tuple,
556  Size usable_bytes_per_page)
557 {
558  BlockNumber curpages;
559  BlockNumber relpages;
560  double reltuples;
561  BlockNumber relallvisible;
562  double density;
563 
564  /* it should have storage, so we can call the smgr */
565  curpages = RelationGetNumberOfBlocks(rel);
566 
567  /* coerce values in pg_class to more desirable types */
568  relpages = (BlockNumber) rel->rd_rel->relpages;
569  reltuples = (double) rel->rd_rel->reltuples;
570  relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
571 
572  /*
573  * HACK: if the relation has never yet been vacuumed, use a minimum size
574  * estimate of 10 pages. The idea here is to avoid assuming a
575  * newly-created table is really small, even if it currently is, because
576  * that may not be true once some data gets loaded into it. Once a vacuum
577  * or analyze cycle has been done on it, it's more reasonable to believe
578  * the size is somewhat stable.
579  *
580  * (Note that this is only an issue if the plan gets cached and used again
581  * after the table has been filled. What we're trying to avoid is using a
582  * nestloop-type plan on a table that has grown substantially since the
583  * plan was made. Normally, autovacuum/autoanalyze will occur once enough
584  * inserts have happened and cause cached-plan invalidation; but that
585  * doesn't happen instantaneously, and it won't happen at all for cases
586  * such as temporary tables.)
587  *
588  * We approximate "never vacuumed" by "has relpages = 0", which means this
589  * will also fire on genuinely empty relations. Not great, but
590  * fortunately that's a seldom-seen case in the real world, and it
591  * shouldn't degrade the quality of the plan too much anyway to err in
592  * this direction.
593  *
594  * If the table has inheritance children, we don't apply this heuristic.
595  * Totally empty parent tables are quite common, so we should be willing
596  * to believe that they are empty.
597  */
598  if (curpages < 10 &&
599  relpages == 0 &&
600  !rel->rd_rel->relhassubclass)
601  curpages = 10;
602 
603  /* report estimated # pages */
604  *pages = curpages;
605  /* quick exit if rel is clearly empty */
606  if (curpages == 0)
607  {
608  *tuples = 0;
609  *allvisfrac = 0;
610  return;
611  }
612 
613  /* estimate number of tuples from previous tuple density */
614  if (relpages > 0)
615  density = reltuples / (double) relpages;
616  else
617  {
618  /*
619  * When we have no data because the relation was truncated, estimate
620  * tuple width from attribute datatypes. We assume here that the
621  * pages are completely full, which is OK for tables (since they've
622  * presumably not been VACUUMed yet) but is probably an overestimate
623  * for indexes. Fortunately get_relation_info() can clamp the
624  * overestimate to the parent table's size.
625  *
626  * Note: this code intentionally disregards alignment considerations,
627  * because (a) that would be gilding the lily considering how crude
628  * the estimate is, (b) it creates platform dependencies in the
629  * default plans which are kind of a headache for regression testing,
630  * and (c) different table AMs might use different padding schemes.
631  */
632  int32 tuple_width;
633 
634  tuple_width = get_rel_data_width(rel, attr_widths);
635  tuple_width += overhead_bytes_per_tuple;
636  /* note: integer division is intentional here */
637  density = usable_bytes_per_page / tuple_width;
638  }
639  *tuples = rint(density * (double) curpages);
640 
641  /*
642  * We use relallvisible as-is, rather than scaling it up like we do for
643  * the pages and tuples counts, on the theory that any pages added since
644  * the last VACUUM are most likely not marked all-visible. But costsize.c
645  * wants it converted to a fraction.
646  */
647  if (relallvisible == 0 || curpages <= 0)
648  *allvisfrac = 0;
649  else if ((double) relallvisible >= curpages)
650  *allvisfrac = 1;
651  else
652  *allvisfrac = (double) relallvisible / curpages;
653 }
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:77
BlockNumber table_block_parallelscan_nextpage(Relation rel, ParallelBlockTableScanDesc pbscan)
Definition: tableam.c:454
pg_atomic_uint64 phs_nallocated
Definition: relscan.h:79
void table_scan_update_snapshot(TableScanDesc scan, Snapshot snapshot)
Definition: tableam.c:110
#define ItemPointerGetOffsetNumberNoCheck(pointer)
Definition: itemptr.h:108
LockTupleMode
Definition: lockoptions.h:49
bool synchronize_seqscans
Definition: tableam.c:35
Size(* parallelscan_estimate)(Relation rel)
Definition: tableam.h:237
void table_block_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.c:396
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:467
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2161
uint64 table_block_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.c:512
ParallelTableScanDescData base
Definition: relscan.h:74
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:865
TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
Definition: tableam.c:98
#define RelationGetDescr(relation)
Definition: rel.h:482
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1208
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:82
struct ParallelBlockTableScanDescData ParallelBlockTableScanDescData
#define SpinLockInit(lock)
Definition: spin.h:60
const TupleTableSlotOps * table_slot_callbacks(Relation relation)
Definition: tableam.c:44
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:442
void simple_table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, Snapshot snapshot, bool *update_indexes)
Definition: tableam.c:324
static IndexFetchTableData * table_index_fetch_begin(Relation rel)
Definition: tableam.h:965
#define DEFAULT_TABLE_ACCESS_METHOD
Definition: tableam.h:27
int errcode(int sqlerrcode)
Definition: elog.c:610
uint32 BlockNumber
Definition: block.h:31
Form_pg_class rd_rel
Definition: rel.h:109
unsigned int Oid
Definition: postgres_ext.h:31
uint32 rs_flags
Definition: relscan.h:43
Size table_block_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.c:378
static void table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate)
Definition: tableam.h:1152
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:438
signed int int32
Definition: c.h:355
Size(* parallelscan_initialize)(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.h:244
const TupleTableSlotOps *(* slot_callbacks)(Relation rel)
Definition: tableam.h:176
Size table_block_parallelscan_estimate(Relation rel)
Definition: tableam.c:372
#define RelationOpenSmgr(relation)
Definition: rel.h:513
#define SpinLockAcquire(lock)
Definition: spin.h:62
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:415
#define ERROR
Definition: elog.h:43
void SerializeSnapshot(Snapshot snapshot, char *start_address)
Definition: snapmgr.c:2102
TableScanDesc(* scan_begin)(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, ParallelTableScanDesc pscan, uint32 flags)
Definition: tableam.h:200
void table_block_parallelscan_startblock_init(Relation rel, ParallelBlockTableScanDesc pbscan)
Definition: tableam.c:411
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
void table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid)
Definition: tableam.c:232
#define RelationGetRelationName(relation)
Definition: rel.h:490
unsigned int uint32
Definition: c.h:367
bool table_index_fetch_tuple_check(Relation rel, ItemPointer tid, Snapshot snapshot, bool *all_dead)
Definition: tableam.c:205
BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks)
Definition: syncscan.c:253
ForkNumber
Definition: relpath.h:40
void simple_table_tuple_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
Definition: tableam.c:279
List * lappend(List *list, void *datum)
Definition: list.c:321
TableScanDesc table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan)
Definition: tableam.c:161
#define InvalidSnapshot
Definition: snapshot.h:123
#define SpinLockRelease(lock)
Definition: spin.h:64
Size EstimateSnapshotSpace(Snapshot snap)
Definition: snapmgr.c:2078
TM_Result
Definition: tableam.h:69
void table_block_relation_estimate_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
Definition: tableam.c:552
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
static bool table_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
Definition: tableam.h:1014
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:211
static TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: tableam.h:1241
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define ereport(elevel,...)
Definition: elog.h:144
Size table_parallelscan_estimate(Relation rel, Snapshot snapshot)
Definition: tableam.c:126
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:538
#define Assert(condition)
Definition: c.h:738
void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot)
Definition: tableam.c:265
Definition: tableam.h:75
void(* tuple_get_latest_tid)(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:333
size_t Size
Definition: c.h:466
#define InvalidBlockNumber
Definition: block.h:33
#define MAX_FORKNUM
Definition: relpath.h:55
void ss_report_location(Relation rel, BlockNumber location)
Definition: syncscan.c:288
#define SnapshotAny
Definition: snapmgr.h:69
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:572
Relation rs_rd
Definition: relscan.h:34
bool(* tuple_tid_valid)(TableScanDesc scan, ItemPointer tid)
Definition: tableam.h:326
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define ItemPointerGetBlockNumberNoCheck(pointer)
Definition: itemptr.h:89
int32 get_rel_data_width(Relation rel, int32 *attr_widths)
Definition: plancat.c:1090
#define elog(elevel,...)
Definition: elog.h:214
int i
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:84
int NBuffers
Definition: globals.c:131
char * default_table_access_method
Definition: tableam.c:34
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:746
static TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, bool *update_indexes)
Definition: tableam.h:1285
void table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan, Snapshot snapshot)
Definition: tableam.c:141
static void table_index_fetch_end(struct IndexFetchTableData *scan)
Definition: tableam.h:984
Definition: pg_list.h:50
#define RelationGetRelid(relation)
Definition: rel.h:456