PostgreSQL Source Code  git master
heapam_handler.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * heapam_handler.c
4  * heap table access method code
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/access/heap/heapam_handler.c
12  *
13  *
14  * NOTES
15  * This files wires up the lower level heapam.c et al routines with the
16  * tableam abstraction.
17  *
18  *-------------------------------------------------------------------------
19  */
20 #include "postgres.h"
21 
22 #include "access/genam.h"
23 #include "access/heapam.h"
24 #include "access/heaptoast.h"
25 #include "access/multixact.h"
26 #include "access/rewriteheap.h"
27 #include "access/syncscan.h"
28 #include "access/tableam.h"
29 #include "access/tsmapi.h"
30 #include "access/xact.h"
31 #include "catalog/catalog.h"
32 #include "catalog/index.h"
33 #include "catalog/storage.h"
34 #include "catalog/storage_xlog.h"
35 #include "commands/progress.h"
36 #include "executor/executor.h"
37 #include "miscadmin.h"
38 #include "pgstat.h"
39 #include "storage/bufmgr.h"
40 #include "storage/bufpage.h"
41 #include "storage/lmgr.h"
42 #include "storage/predicate.h"
43 #include "storage/procarray.h"
44 #include "storage/smgr.h"
45 #include "utils/builtins.h"
46 #include "utils/rel.h"
47 
48 static void reform_and_rewrite_tuple(HeapTuple tuple,
49  Relation OldHeap, Relation NewHeap,
50  Datum *values, bool *isnull, RewriteState rwstate);
51 
52 static bool SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer,
53  HeapTuple tuple,
54  OffsetNumber tupoffset);
55 
57 
59 
60 
61 /* ------------------------------------------------------------------------
62  * Slot related callbacks for heap AM
63  * ------------------------------------------------------------------------
64  */
65 
66 static const TupleTableSlotOps *
68 {
69  return &TTSOpsBufferHeapTuple;
70 }
71 
72 
73 /* ------------------------------------------------------------------------
74  * Index Scan Callbacks for heap AM
75  * ------------------------------------------------------------------------
76  */
77 
78 static IndexFetchTableData *
80 {
82 
83  hscan->xs_base.rel = rel;
84  hscan->xs_cbuf = InvalidBuffer;
85 
86  return &hscan->xs_base;
87 }
88 
89 static void
91 {
92  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
93 
94  if (BufferIsValid(hscan->xs_cbuf))
95  {
96  ReleaseBuffer(hscan->xs_cbuf);
97  hscan->xs_cbuf = InvalidBuffer;
98  }
99 }
100 
101 static void
103 {
104  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
105 
107 
108  pfree(hscan);
109 }
110 
111 static bool
113  ItemPointer tid,
114  Snapshot snapshot,
115  TupleTableSlot *slot,
116  bool *call_again, bool *all_dead)
117 {
118  IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
120  bool got_heap_tuple;
121 
122  Assert(TTS_IS_BUFFERTUPLE(slot));
123 
124  /* We can skip the buffer-switching logic if we're in mid-HOT chain. */
125  if (!*call_again)
126  {
127  /* Switch to correct buffer if we don't have it already */
128  Buffer prev_buf = hscan->xs_cbuf;
129 
130  hscan->xs_cbuf = ReleaseAndReadBuffer(hscan->xs_cbuf,
131  hscan->xs_base.rel,
133 
134  /*
135  * Prune page, but only if we weren't already on this page
136  */
137  if (prev_buf != hscan->xs_cbuf)
138  heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf);
139  }
140 
141  /* Obtain share-lock on the buffer so we can examine visibility */
143  got_heap_tuple = heap_hot_search_buffer(tid,
144  hscan->xs_base.rel,
145  hscan->xs_cbuf,
146  snapshot,
147  &bslot->base.tupdata,
148  all_dead,
149  !*call_again);
150  bslot->base.tupdata.t_self = *tid;
152 
153  if (got_heap_tuple)
154  {
155  /*
156  * Only in a non-MVCC snapshot can more than one member of the HOT
157  * chain be visible.
158  */
159  *call_again = !IsMVCCSnapshot(snapshot);
160 
161  slot->tts_tableOid = RelationGetRelid(scan->rel);
162  ExecStoreBufferHeapTuple(&bslot->base.tupdata, slot, hscan->xs_cbuf);
163  }
164  else
165  {
166  /* We've reached the end of the HOT chain. */
167  *call_again = false;
168  }
169 
170  return got_heap_tuple;
171 }
172 
173 
174 /* ------------------------------------------------------------------------
175  * Callbacks for non-modifying operations on individual tuples for heap AM
176  * ------------------------------------------------------------------------
177  */
178 
179 static bool
181  ItemPointer tid,
182  Snapshot snapshot,
183  TupleTableSlot *slot)
184 {
186  Buffer buffer;
187 
188  Assert(TTS_IS_BUFFERTUPLE(slot));
189 
190  bslot->base.tupdata.t_self = *tid;
191  if (heap_fetch(relation, snapshot, &bslot->base.tupdata, &buffer))
192  {
193  /* store in slot, transferring existing pin */
194  ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata, slot, buffer);
195  slot->tts_tableOid = RelationGetRelid(relation);
196 
197  return true;
198  }
199 
200  return false;
201 }
202 
203 static bool
205 {
206  HeapScanDesc hscan = (HeapScanDesc) scan;
207 
208  return ItemPointerIsValid(tid) &&
210 }
211 
212 static bool
214  Snapshot snapshot)
215 {
217  bool res;
218 
219  Assert(TTS_IS_BUFFERTUPLE(slot));
220  Assert(BufferIsValid(bslot->buffer));
221 
222  /*
223  * We need buffer pin and lock to call HeapTupleSatisfiesVisibility.
224  * Caller should be holding pin, but not lock.
225  */
227  res = HeapTupleSatisfiesVisibility(bslot->base.tuple, snapshot,
228  bslot->buffer);
230 
231  return res;
232 }
233 
234 
235 /* ----------------------------------------------------------------------------
236  * Functions for manipulations of physical tuples for heap AM.
237  * ----------------------------------------------------------------------------
238  */
239 
240 static void
242  int options, BulkInsertState bistate)
243 {
244  bool shouldFree = true;
245  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
246 
247  /* Update the tuple with table oid */
248  slot->tts_tableOid = RelationGetRelid(relation);
249  tuple->t_tableOid = slot->tts_tableOid;
250 
251  /* Perform the insertion, and copy the resulting ItemPointer */
252  heap_insert(relation, tuple, cid, options, bistate);
253  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
254 
255  if (shouldFree)
256  pfree(tuple);
257 }
258 
259 static void
261  CommandId cid, int options,
262  BulkInsertState bistate, uint32 specToken)
263 {
264  bool shouldFree = true;
265  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
266 
267  /* Update the tuple with table oid */
268  slot->tts_tableOid = RelationGetRelid(relation);
269  tuple->t_tableOid = slot->tts_tableOid;
270 
271  HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
272  options |= HEAP_INSERT_SPECULATIVE;
273 
274  /* Perform the insertion, and copy the resulting ItemPointer */
275  heap_insert(relation, tuple, cid, options, bistate);
276  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
277 
278  if (shouldFree)
279  pfree(tuple);
280 }
281 
282 static void
284  uint32 specToken, bool succeeded)
285 {
286  bool shouldFree = true;
287  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
288 
289  /* adjust the tuple's state accordingly */
290  if (succeeded)
291  heap_finish_speculative(relation, &slot->tts_tid);
292  else
293  heap_abort_speculative(relation, &slot->tts_tid);
294 
295  if (shouldFree)
296  pfree(tuple);
297 }
298 
299 static TM_Result
301  Snapshot snapshot, Snapshot crosscheck, bool wait,
302  TM_FailureData *tmfd, bool changingPart)
303 {
304  /*
305  * Currently Deleting of index tuples are handled at vacuum, in case if
306  * the storage itself is cleaning the dead tuples by itself, it is the
307  * time to call the index tuple deletion also.
308  */
309  return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart);
310 }
311 
312 
313 static TM_Result
315  CommandId cid, Snapshot snapshot, Snapshot crosscheck,
316  bool wait, TM_FailureData *tmfd,
317  LockTupleMode *lockmode, bool *update_indexes)
318 {
319  bool shouldFree = true;
320  HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
321  TM_Result result;
322 
323  /* Update the tuple with table oid */
324  slot->tts_tableOid = RelationGetRelid(relation);
325  tuple->t_tableOid = slot->tts_tableOid;
326 
327  result = heap_update(relation, otid, tuple, cid, crosscheck, wait,
328  tmfd, lockmode);
329  ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
330 
331  /*
332  * Decide whether new index entries are needed for the tuple
333  *
334  * Note: heap_update returns the tid (location) of the new tuple in the
335  * t_self field.
336  *
337  * If it's a HOT update, we mustn't insert new index entries.
338  */
339  *update_indexes = result == TM_Ok && !HeapTupleIsHeapOnly(tuple);
340 
341  if (shouldFree)
342  pfree(tuple);
343 
344  return result;
345 }
346 
347 static TM_Result
350  LockWaitPolicy wait_policy, uint8 flags,
351  TM_FailureData *tmfd)
352 {
354  TM_Result result;
355  Buffer buffer;
356  HeapTuple tuple = &bslot->base.tupdata;
357  bool follow_updates;
358 
359  follow_updates = (flags & TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS) != 0;
360  tmfd->traversed = false;
361 
362  Assert(TTS_IS_BUFFERTUPLE(slot));
363 
364 tuple_lock_retry:
365  tuple->t_self = *tid;
366  result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy,
367  follow_updates, &buffer, tmfd);
368 
369  if (result == TM_Updated &&
371  {
372  /* Should not encounter speculative tuple on recheck */
374 
375  ReleaseBuffer(buffer);
376 
377  if (!ItemPointerEquals(&tmfd->ctid, &tuple->t_self))
378  {
379  SnapshotData SnapshotDirty;
380  TransactionId priorXmax;
381 
382  /* it was updated, so look at the updated version */
383  *tid = tmfd->ctid;
384  /* updated row should have xmin matching this xmax */
385  priorXmax = tmfd->xmax;
386 
387  /* signal that a tuple later in the chain is getting locked */
388  tmfd->traversed = true;
389 
390  /*
391  * fetch target tuple
392  *
393  * Loop here to deal with updated or busy tuples
394  */
395  InitDirtySnapshot(SnapshotDirty);
396  for (;;)
397  {
399  ereport(ERROR,
400  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
401  errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
402 
403  tuple->t_self = *tid;
404  if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer))
405  {
406  /*
407  * If xmin isn't what we're expecting, the slot must have
408  * been recycled and reused for an unrelated tuple. This
409  * implies that the latest version of the row was deleted,
410  * so we need do nothing. (Should be safe to examine xmin
411  * without getting buffer's content lock. We assume
412  * reading a TransactionId to be atomic, and Xmin never
413  * changes in an existing tuple, except to invalid or
414  * frozen, and neither of those can match priorXmax.)
415  */
417  priorXmax))
418  {
419  ReleaseBuffer(buffer);
420  return TM_Deleted;
421  }
422 
423  /* otherwise xmin should not be dirty... */
424  if (TransactionIdIsValid(SnapshotDirty.xmin))
425  ereport(ERROR,
427  errmsg_internal("t_xmin is uncommitted in tuple to be updated")));
428 
429  /*
430  * If tuple is being updated by other transaction then we
431  * have to wait for its commit/abort, or die trying.
432  */
433  if (TransactionIdIsValid(SnapshotDirty.xmax))
434  {
435  ReleaseBuffer(buffer);
436  switch (wait_policy)
437  {
438  case LockWaitBlock:
439  XactLockTableWait(SnapshotDirty.xmax,
440  relation, &tuple->t_self,
442  break;
443  case LockWaitSkip:
444  if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
445  /* skip instead of waiting */
446  return TM_WouldBlock;
447  break;
448  case LockWaitError:
449  if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
450  ereport(ERROR,
451  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
452  errmsg("could not obtain lock on row in relation \"%s\"",
453  RelationGetRelationName(relation))));
454  break;
455  }
456  continue; /* loop back to repeat heap_fetch */
457  }
458 
459  /*
460  * If tuple was inserted by our own transaction, we have
461  * to check cmin against cid: cmin >= current CID means
462  * our command cannot see the tuple, so we should ignore
463  * it. Otherwise heap_lock_tuple() will throw an error,
464  * and so would any later attempt to update or delete the
465  * tuple. (We need not check cmax because
466  * HeapTupleSatisfiesDirty will consider a tuple deleted
467  * by our transaction dead, regardless of cmax.) We just
468  * checked that priorXmax == xmin, so we can test that
469  * variable instead of doing HeapTupleHeaderGetXmin again.
470  */
471  if (TransactionIdIsCurrentTransactionId(priorXmax) &&
472  HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
473  {
474  tmfd->xmax = priorXmax;
475 
476  /*
477  * Cmin is the problematic value, so store that. See
478  * above.
479  */
480  tmfd->cmax = HeapTupleHeaderGetCmin(tuple->t_data);
481  ReleaseBuffer(buffer);
482  return TM_SelfModified;
483  }
484 
485  /*
486  * This is a live tuple, so try to lock it again.
487  */
488  ReleaseBuffer(buffer);
489  goto tuple_lock_retry;
490  }
491 
492  /*
493  * If the referenced slot was actually empty, the latest
494  * version of the row must have been deleted, so we need do
495  * nothing.
496  */
497  if (tuple->t_data == NULL)
498  {
499  return TM_Deleted;
500  }
501 
502  /*
503  * As above, if xmin isn't what we're expecting, do nothing.
504  */
506  priorXmax))
507  {
508  if (BufferIsValid(buffer))
509  ReleaseBuffer(buffer);
510  return TM_Deleted;
511  }
512 
513  /*
514  * If we get here, the tuple was found but failed
515  * SnapshotDirty. Assuming the xmin is either a committed xact
516  * or our own xact (as it certainly should be if we're trying
517  * to modify the tuple), this must mean that the row was
518  * updated or deleted by either a committed xact or our own
519  * xact. If it was deleted, we can ignore it; if it was
520  * updated then chain up to the next version and repeat the
521  * whole process.
522  *
523  * As above, it should be safe to examine xmax and t_ctid
524  * without the buffer content lock, because they can't be
525  * changing.
526  */
527  if (ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
528  {
529  /* deleted, so forget about it */
530  if (BufferIsValid(buffer))
531  ReleaseBuffer(buffer);
532  return TM_Deleted;
533  }
534 
535  /* updated, so look at the updated row */
536  *tid = tuple->t_data->t_ctid;
537  /* updated row should have xmin matching this xmax */
538  priorXmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
539  if (BufferIsValid(buffer))
540  ReleaseBuffer(buffer);
541  /* loop back to fetch next in chain */
542  }
543  }
544  else
545  {
546  /* tuple was deleted, so give up */
547  return TM_Deleted;
548  }
549  }
550 
551  slot->tts_tableOid = RelationGetRelid(relation);
552  tuple->t_tableOid = slot->tts_tableOid;
553 
554  /* store in slot, transferring existing pin */
555  ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
556 
557  return result;
558 }
559 
560 
561 /* ------------------------------------------------------------------------
562  * DDL related callbacks for heap AM.
563  * ------------------------------------------------------------------------
564  */
565 
566 static void
568  const RelFileNode *newrnode,
569  char persistence,
570  TransactionId *freezeXid,
571  MultiXactId *minmulti)
572 {
573  SMgrRelation srel;
574 
575  /*
576  * Initialize to the minimum XID that could put tuples in the table. We
577  * know that no xacts older than RecentXmin are still running, so that
578  * will do.
579  */
580  *freezeXid = RecentXmin;
581 
582  /*
583  * Similarly, initialize the minimum Multixact to the first value that
584  * could possibly be stored in tuples in the table. Running transactions
585  * could reuse values from their local cache, so we are careful to
586  * consider all currently running multis.
587  *
588  * XXX this could be refined further, but is it worth the hassle?
589  */
590  *minmulti = GetOldestMultiXactId();
591 
592  srel = RelationCreateStorage(*newrnode, persistence);
593 
594  /*
595  * If required, set up an init fork for an unlogged table so that it can
596  * be correctly reinitialized on restart. An immediate sync is required
597  * even if the page has been logged, because the write did not go through
598  * shared_buffers and therefore a concurrent checkpoint may have moved the
599  * redo pointer past our xlog record. Recovery may as well remove it
600  * while replaying, for example, XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE
601  * record. Therefore, logging is necessary even if wal_level=minimal.
602  */
603  if (persistence == RELPERSISTENCE_UNLOGGED)
604  {
605  Assert(rel->rd_rel->relkind == RELKIND_RELATION ||
606  rel->rd_rel->relkind == RELKIND_MATVIEW ||
607  rel->rd_rel->relkind == RELKIND_TOASTVALUE);
608  smgrcreate(srel, INIT_FORKNUM, false);
609  log_smgrcreate(newrnode, INIT_FORKNUM);
611  }
612 
613  smgrclose(srel);
614 }
615 
616 static void
618 {
619  RelationTruncate(rel, 0);
620 }
621 
622 static void
624 {
625  SMgrRelation dstrel;
626 
627  dstrel = smgropen(*newrnode, rel->rd_backend);
628  RelationOpenSmgr(rel);
629 
630  /*
631  * Since we copy the file directly without looking at the shared buffers,
632  * we'd better first flush out any pages of the source relation that are
633  * in shared buffers. We assume no new changes will be made while we are
634  * holding exclusive lock on the rel.
635  */
637 
638  /*
639  * Create and copy all forks of the relation, and schedule unlinking of
640  * old physical files.
641  *
642  * NOTE: any conflict in relfilenode value will be caught in
643  * RelationCreateStorage().
644  */
645  RelationCreateStorage(*newrnode, rel->rd_rel->relpersistence);
646 
647  /* copy main fork */
649  rel->rd_rel->relpersistence);
650 
651  /* copy those extra forks that exist */
652  for (ForkNumber forkNum = MAIN_FORKNUM + 1;
653  forkNum <= MAX_FORKNUM; forkNum++)
654  {
655  if (smgrexists(rel->rd_smgr, forkNum))
656  {
657  smgrcreate(dstrel, forkNum, false);
658 
659  /*
660  * WAL log creation if the relation is persistent, or this is the
661  * init fork of an unlogged relation.
662  */
663  if (rel->rd_rel->relpersistence == RELPERSISTENCE_PERMANENT ||
664  (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
665  forkNum == INIT_FORKNUM))
666  log_smgrcreate(newrnode, forkNum);
667  RelationCopyStorage(rel->rd_smgr, dstrel, forkNum,
668  rel->rd_rel->relpersistence);
669  }
670  }
671 
672 
673  /* drop old relation, and close new one */
674  RelationDropStorage(rel);
675  smgrclose(dstrel);
676 }
677 
678 static void
680  Relation OldIndex, bool use_sort,
682  TransactionId *xid_cutoff,
683  MultiXactId *multi_cutoff,
684  double *num_tuples,
685  double *tups_vacuumed,
686  double *tups_recently_dead)
687 {
688  RewriteState rwstate;
689  IndexScanDesc indexScan;
690  TableScanDesc tableScan;
691  HeapScanDesc heapScan;
692  bool is_system_catalog;
693  Tuplesortstate *tuplesort;
694  TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
695  TupleDesc newTupDesc = RelationGetDescr(NewHeap);
696  TupleTableSlot *slot;
697  int natts;
698  Datum *values;
699  bool *isnull;
701 
702  /* Remember if it's a system catalog */
703  is_system_catalog = IsSystemRelation(OldHeap);
704 
705  /*
706  * Valid smgr_targblock implies something already wrote to the relation.
707  * This may be harmless, but this function hasn't planned for it.
708  */
710 
711  /* Preallocate values/isnull arrays */
712  natts = newTupDesc->natts;
713  values = (Datum *) palloc(natts * sizeof(Datum));
714  isnull = (bool *) palloc(natts * sizeof(bool));
715 
716  /* Initialize the rewrite operation */
717  rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestXmin, *xid_cutoff,
718  *multi_cutoff);
719 
720 
721  /* Set up sorting if wanted */
722  if (use_sort)
723  tuplesort = tuplesort_begin_cluster(oldTupDesc, OldIndex,
725  NULL, false);
726  else
727  tuplesort = NULL;
728 
729  /*
730  * Prepare to scan the OldHeap. To ensure we see recently-dead tuples
731  * that still need to be copied, we scan with SnapshotAny and use
732  * HeapTupleSatisfiesVacuum for the visibility test.
733  */
734  if (OldIndex != NULL && !use_sort)
735  {
736  const int ci_index[] = {
739  };
740  int64 ci_val[2];
741 
742  /* Set phase and OIDOldIndex to columns */
744  ci_val[1] = RelationGetRelid(OldIndex);
745  pgstat_progress_update_multi_param(2, ci_index, ci_val);
746 
747  tableScan = NULL;
748  heapScan = NULL;
749  indexScan = index_beginscan(OldHeap, OldIndex, SnapshotAny, 0, 0);
750  index_rescan(indexScan, NULL, 0, NULL, 0);
751  }
752  else
753  {
754  /* In scan-and-sort mode and also VACUUM FULL, set phase */
757 
758  tableScan = table_beginscan(OldHeap, SnapshotAny, 0, (ScanKey) NULL);
759  heapScan = (HeapScanDesc) tableScan;
760  indexScan = NULL;
761 
762  /* Set total heap blocks */
764  heapScan->rs_nblocks);
765  }
766 
767  slot = table_slot_create(OldHeap, NULL);
768  hslot = (BufferHeapTupleTableSlot *) slot;
769 
770  /*
771  * Scan through the OldHeap, either in OldIndex order or sequentially;
772  * copy each tuple into the NewHeap, or transiently to the tuplesort
773  * module. Note that we don't bother sorting dead tuples (they won't get
774  * to the new table anyway).
775  */
776  for (;;)
777  {
778  HeapTuple tuple;
779  Buffer buf;
780  bool isdead;
781 
783 
784  if (indexScan != NULL)
785  {
786  if (!index_getnext_slot(indexScan, ForwardScanDirection, slot))
787  break;
788 
789  /* Since we used no scan keys, should never need to recheck */
790  if (indexScan->xs_recheck)
791  elog(ERROR, "CLUSTER does not support lossy index conditions");
792  }
793  else
794  {
795  if (!table_scan_getnextslot(tableScan, ForwardScanDirection, slot))
796  break;
797 
798  /*
799  * In scan-and-sort mode and also VACUUM FULL, set heap blocks
800  * scanned
801  */
803  heapScan->rs_cblock + 1);
804  }
805 
806  tuple = ExecFetchSlotHeapTuple(slot, false, NULL);
807  buf = hslot->buffer;
808 
810 
811  switch (HeapTupleSatisfiesVacuum(tuple, OldestXmin, buf))
812  {
813  case HEAPTUPLE_DEAD:
814  /* Definitely dead */
815  isdead = true;
816  break;
818  *tups_recently_dead += 1;
819  /* fall through */
820  case HEAPTUPLE_LIVE:
821  /* Live or recently dead, must copy it */
822  isdead = false;
823  break;
825 
826  /*
827  * Since we hold exclusive lock on the relation, normally the
828  * only way to see this is if it was inserted earlier in our
829  * own transaction. However, it can happen in system
830  * catalogs, since we tend to release write lock before commit
831  * there. Give a warning if neither case applies; but in any
832  * case we had better copy it.
833  */
834  if (!is_system_catalog &&
836  elog(WARNING, "concurrent insert in progress within table \"%s\"",
837  RelationGetRelationName(OldHeap));
838  /* treat as live */
839  isdead = false;
840  break;
842 
843  /*
844  * Similar situation to INSERT_IN_PROGRESS case.
845  */
846  if (!is_system_catalog &&
848  elog(WARNING, "concurrent delete in progress within table \"%s\"",
849  RelationGetRelationName(OldHeap));
850  /* treat as recently dead */
851  *tups_recently_dead += 1;
852  isdead = false;
853  break;
854  default:
855  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
856  isdead = false; /* keep compiler quiet */
857  break;
858  }
859 
861 
862  if (isdead)
863  {
864  *tups_vacuumed += 1;
865  /* heap rewrite module still needs to see it... */
866  if (rewrite_heap_dead_tuple(rwstate, tuple))
867  {
868  /* A previous recently-dead tuple is now known dead */
869  *tups_vacuumed += 1;
870  *tups_recently_dead -= 1;
871  }
872  continue;
873  }
874 
875  *num_tuples += 1;
876  if (tuplesort != NULL)
877  {
878  tuplesort_putheaptuple(tuplesort, tuple);
879 
880  /*
881  * In scan-and-sort mode, report increase in number of tuples
882  * scanned
883  */
885  *num_tuples);
886  }
887  else
888  {
889  const int ct_index[] = {
892  };
893  int64 ct_val[2];
894 
895  reform_and_rewrite_tuple(tuple, OldHeap, NewHeap,
896  values, isnull, rwstate);
897 
898  /*
899  * In indexscan mode and also VACUUM FULL, report increase in
900  * number of tuples scanned and written
901  */
902  ct_val[0] = *num_tuples;
903  ct_val[1] = *num_tuples;
904  pgstat_progress_update_multi_param(2, ct_index, ct_val);
905  }
906  }
907 
908  if (indexScan != NULL)
909  index_endscan(indexScan);
910  if (tableScan != NULL)
911  table_endscan(tableScan);
912  if (slot)
914 
915  /*
916  * In scan-and-sort mode, complete the sort, then read out all live tuples
917  * from the tuplestore and write them to the new relation.
918  */
919  if (tuplesort != NULL)
920  {
921  double n_tuples = 0;
922 
923  /* Report that we are now sorting tuples */
926 
927  tuplesort_performsort(tuplesort);
928 
929  /* Report that we are now writing new heap */
932 
933  for (;;)
934  {
935  HeapTuple tuple;
936 
938 
939  tuple = tuplesort_getheaptuple(tuplesort, true);
940  if (tuple == NULL)
941  break;
942 
943  n_tuples += 1;
945  OldHeap, NewHeap,
946  values, isnull,
947  rwstate);
948  /* Report n_tuples */
950  n_tuples);
951  }
952 
953  tuplesort_end(tuplesort);
954  }
955 
956  /* Write out any remaining tuples, and fsync if needed */
957  end_heap_rewrite(rwstate);
958 
959  /* Clean up */
960  pfree(values);
961  pfree(isnull);
962 }
963 
964 static bool
966  BufferAccessStrategy bstrategy)
967 {
968  HeapScanDesc hscan = (HeapScanDesc) scan;
969 
970  /*
971  * We must maintain a pin on the target page's buffer to ensure that
972  * concurrent activity - e.g. HOT pruning - doesn't delete tuples out from
973  * under us. Hence, pin the page until we are done looking at it. We
974  * also choose to hold sharelock on the buffer throughout --- we could
975  * release and re-acquire sharelock for each tuple, but since we aren't
976  * doing much work per tuple, the extra lock traffic is probably better
977  * avoided.
978  */
979  hscan->rs_cblock = blockno;
980  hscan->rs_cindex = FirstOffsetNumber;
982  blockno, RBM_NORMAL, bstrategy);
984 
985  /* in heap all blocks can contain tuples, so always return true */
986  return true;
987 }
988 
989 static bool
991  double *liverows, double *deadrows,
992  TupleTableSlot *slot)
993 {
994  HeapScanDesc hscan = (HeapScanDesc) scan;
995  Page targpage;
996  OffsetNumber maxoffset;
998 
999  Assert(TTS_IS_BUFFERTUPLE(slot));
1000 
1001  hslot = (BufferHeapTupleTableSlot *) slot;
1002  targpage = BufferGetPage(hscan->rs_cbuf);
1003  maxoffset = PageGetMaxOffsetNumber(targpage);
1004 
1005  /* Inner loop over all tuples on the selected page */
1006  for (; hscan->rs_cindex <= maxoffset; hscan->rs_cindex++)
1007  {
1008  ItemId itemid;
1009  HeapTuple targtuple = &hslot->base.tupdata;
1010  bool sample_it = false;
1011 
1012  itemid = PageGetItemId(targpage, hscan->rs_cindex);
1013 
1014  /*
1015  * We ignore unused and redirect line pointers. DEAD line pointers
1016  * should be counted as dead, because we need vacuum to run to get rid
1017  * of them. Note that this rule agrees with the way that
1018  * heap_page_prune() counts things.
1019  */
1020  if (!ItemIdIsNormal(itemid))
1021  {
1022  if (ItemIdIsDead(itemid))
1023  *deadrows += 1;
1024  continue;
1025  }
1026 
1027  ItemPointerSet(&targtuple->t_self, hscan->rs_cblock, hscan->rs_cindex);
1028 
1029  targtuple->t_tableOid = RelationGetRelid(scan->rs_rd);
1030  targtuple->t_data = (HeapTupleHeader) PageGetItem(targpage, itemid);
1031  targtuple->t_len = ItemIdGetLength(itemid);
1032 
1033  switch (HeapTupleSatisfiesVacuum(targtuple, OldestXmin,
1034  hscan->rs_cbuf))
1035  {
1036  case HEAPTUPLE_LIVE:
1037  sample_it = true;
1038  *liverows += 1;
1039  break;
1040 
1041  case HEAPTUPLE_DEAD:
1043  /* Count dead and recently-dead rows */
1044  *deadrows += 1;
1045  break;
1046 
1048 
1049  /*
1050  * Insert-in-progress rows are not counted. We assume that
1051  * when the inserting transaction commits or aborts, it will
1052  * send a stats message to increment the proper count. This
1053  * works right only if that transaction ends after we finish
1054  * analyzing the table; if things happen in the other order,
1055  * its stats update will be overwritten by ours. However, the
1056  * error will be large only if the other transaction runs long
1057  * enough to insert many tuples, so assuming it will finish
1058  * after us is the safer option.
1059  *
1060  * A special case is that the inserting transaction might be
1061  * our own. In this case we should count and sample the row,
1062  * to accommodate users who load a table and analyze it in one
1063  * transaction. (pgstat_report_analyze has to adjust the
1064  * numbers we send to the stats collector to make this come
1065  * out right.)
1066  */
1068  {
1069  sample_it = true;
1070  *liverows += 1;
1071  }
1072  break;
1073 
1075 
1076  /*
1077  * We count and sample delete-in-progress rows the same as
1078  * live ones, so that the stats counters come out right if the
1079  * deleting transaction commits after us, per the same
1080  * reasoning given above.
1081  *
1082  * If the delete was done by our own transaction, however, we
1083  * must count the row as dead to make pgstat_report_analyze's
1084  * stats adjustments come out right. (Note: this works out
1085  * properly when the row was both inserted and deleted in our
1086  * xact.)
1087  *
1088  * The net effect of these choices is that we act as though an
1089  * IN_PROGRESS transaction hasn't happened yet, except if it
1090  * is our own transaction, which we assume has happened.
1091  *
1092  * This approach ensures that we behave sanely if we see both
1093  * the pre-image and post-image rows for a row being updated
1094  * by a concurrent transaction: we will sample the pre-image
1095  * but not the post-image. We also get sane results if the
1096  * concurrent transaction never commits.
1097  */
1099  *deadrows += 1;
1100  else
1101  {
1102  sample_it = true;
1103  *liverows += 1;
1104  }
1105  break;
1106 
1107  default:
1108  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1109  break;
1110  }
1111 
1112  if (sample_it)
1113  {
1114  ExecStoreBufferHeapTuple(targtuple, slot, hscan->rs_cbuf);
1115  hscan->rs_cindex++;
1116 
1117  /* note that we leave the buffer locked here! */
1118  return true;
1119  }
1120  }
1121 
1122  /* Now release the lock and pin on the page */
1123  UnlockReleaseBuffer(hscan->rs_cbuf);
1124  hscan->rs_cbuf = InvalidBuffer;
1125 
1126  /* also prevent old slot contents from having pin on page */
1127  ExecClearTuple(slot);
1128 
1129  return false;
1130 }
1131 
1132 static double
1134  Relation indexRelation,
1135  IndexInfo *indexInfo,
1136  bool allow_sync,
1137  bool anyvisible,
1138  bool progress,
1139  BlockNumber start_blockno,
1140  BlockNumber numblocks,
1142  void *callback_state,
1143  TableScanDesc scan)
1144 {
1145  HeapScanDesc hscan;
1146  bool is_system_catalog;
1147  bool checking_uniqueness;
1148  HeapTuple heapTuple;
1150  bool isnull[INDEX_MAX_KEYS];
1151  double reltuples;
1152  ExprState *predicate;
1153  TupleTableSlot *slot;
1154  EState *estate;
1155  ExprContext *econtext;
1156  Snapshot snapshot;
1157  bool need_unregister_snapshot = false;
1159  BlockNumber previous_blkno = InvalidBlockNumber;
1160  BlockNumber root_blkno = InvalidBlockNumber;
1161  OffsetNumber root_offsets[MaxHeapTuplesPerPage];
1162 
1163  /*
1164  * sanity checks
1165  */
1166  Assert(OidIsValid(indexRelation->rd_rel->relam));
1167 
1168  /* Remember if it's a system catalog */
1169  is_system_catalog = IsSystemRelation(heapRelation);
1170 
1171  /* See whether we're verifying uniqueness/exclusion properties */
1172  checking_uniqueness = (indexInfo->ii_Unique ||
1173  indexInfo->ii_ExclusionOps != NULL);
1174 
1175  /*
1176  * "Any visible" mode is not compatible with uniqueness checks; make sure
1177  * only one of those is requested.
1178  */
1179  Assert(!(anyvisible && checking_uniqueness));
1180 
1181  /*
1182  * Need an EState for evaluation of index expressions and partial-index
1183  * predicates. Also a slot to hold the current tuple.
1184  */
1185  estate = CreateExecutorState();
1186  econtext = GetPerTupleExprContext(estate);
1187  slot = table_slot_create(heapRelation, NULL);
1188 
1189  /* Arrange for econtext's scan tuple to be the tuple under test */
1190  econtext->ecxt_scantuple = slot;
1191 
1192  /* Set up execution state for predicate, if any. */
1193  predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
1194 
1195  /*
1196  * Prepare for scan of the base relation. In a normal index build, we use
1197  * SnapshotAny because we must retrieve all tuples and do our own time
1198  * qual checks (because we have to index RECENTLY_DEAD tuples). In a
1199  * concurrent build, or during bootstrap, we take a regular MVCC snapshot
1200  * and index whatever's live according to that.
1201  */
1202  OldestXmin = InvalidTransactionId;
1203 
1204  /* okay to ignore lazy VACUUMs here */
1205  if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent)
1206  OldestXmin = GetOldestNonRemovableTransactionId(heapRelation);
1207 
1208  if (!scan)
1209  {
1210  /*
1211  * Serial index build.
1212  *
1213  * Must begin our own heap scan in this case. We may also need to
1214  * register a snapshot whose lifetime is under our direct control.
1215  */
1216  if (!TransactionIdIsValid(OldestXmin))
1217  {
1219  need_unregister_snapshot = true;
1220  }
1221  else
1222  snapshot = SnapshotAny;
1223 
1224  scan = table_beginscan_strat(heapRelation, /* relation */
1225  snapshot, /* snapshot */
1226  0, /* number of keys */
1227  NULL, /* scan key */
1228  true, /* buffer access strategy OK */
1229  allow_sync); /* syncscan OK? */
1230  }
1231  else
1232  {
1233  /*
1234  * Parallel index build.
1235  *
1236  * Parallel case never registers/unregisters own snapshot. Snapshot
1237  * is taken from parallel heap scan, and is SnapshotAny or an MVCC
1238  * snapshot, based on same criteria as serial case.
1239  */
1241  Assert(allow_sync);
1242  snapshot = scan->rs_snapshot;
1243  }
1244 
1245  hscan = (HeapScanDesc) scan;
1246 
1247  /*
1248  * Must have called GetOldestNonRemovableTransactionId() if using
1249  * SnapshotAny. Shouldn't have for an MVCC snapshot. (It's especially
1250  * worth checking this for parallel builds, since ambuild routines that
1251  * support parallel builds must work these details out for themselves.)
1252  */
1253  Assert(snapshot == SnapshotAny || IsMVCCSnapshot(snapshot));
1254  Assert(snapshot == SnapshotAny ? TransactionIdIsValid(OldestXmin) :
1255  !TransactionIdIsValid(OldestXmin));
1256  Assert(snapshot == SnapshotAny || !anyvisible);
1257 
1258  /* Publish number of blocks to scan */
1259  if (progress)
1260  {
1261  BlockNumber nblocks;
1262 
1263  if (hscan->rs_base.rs_parallel != NULL)
1264  {
1266 
1268  nblocks = pbscan->phs_nblocks;
1269  }
1270  else
1271  nblocks = hscan->rs_nblocks;
1272 
1274  nblocks);
1275  }
1276 
1277  /* set our scan endpoints */
1278  if (!allow_sync)
1279  heap_setscanlimits(scan, start_blockno, numblocks);
1280  else
1281  {
1282  /* syncscan can only be requested on whole relation */
1283  Assert(start_blockno == 0);
1284  Assert(numblocks == InvalidBlockNumber);
1285  }
1286 
1287  reltuples = 0;
1288 
1289  /*
1290  * Scan all tuples in the base relation.
1291  */
1292  while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
1293  {
1294  bool tupleIsAlive;
1295 
1297 
1298  /* Report scan progress, if asked to. */
1299  if (progress)
1300  {
1301  BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
1302 
1303  if (blocks_done != previous_blkno)
1304  {
1306  blocks_done);
1307  previous_blkno = blocks_done;
1308  }
1309  }
1310 
1311  /*
1312  * When dealing with a HOT-chain of updated tuples, we want to index
1313  * the values of the live tuple (if any), but index it under the TID
1314  * of the chain's root tuple. This approach is necessary to preserve
1315  * the HOT-chain structure in the heap. So we need to be able to find
1316  * the root item offset for every tuple that's in a HOT-chain. When
1317  * first reaching a new page of the relation, call
1318  * heap_get_root_tuples() to build a map of root item offsets on the
1319  * page.
1320  *
1321  * It might look unsafe to use this information across buffer
1322  * lock/unlock. However, we hold ShareLock on the table so no
1323  * ordinary insert/update/delete should occur; and we hold pin on the
1324  * buffer continuously while visiting the page, so no pruning
1325  * operation can occur either.
1326  *
1327  * In cases with only ShareUpdateExclusiveLock on the table, it's
1328  * possible for some HOT tuples to appear that we didn't know about
1329  * when we first read the page. To handle that case, we re-obtain the
1330  * list of root offsets when a HOT tuple points to a root item that we
1331  * don't know about.
1332  *
1333  * Also, although our opinions about tuple liveness could change while
1334  * we scan the page (due to concurrent transaction commits/aborts),
1335  * the chain root locations won't, so this info doesn't need to be
1336  * rebuilt after waiting for another transaction.
1337  *
1338  * Note the implied assumption that there is no more than one live
1339  * tuple per HOT-chain --- else we could create more than one index
1340  * entry pointing to the same root tuple.
1341  */
1342  if (hscan->rs_cblock != root_blkno)
1343  {
1344  Page page = BufferGetPage(hscan->rs_cbuf);
1345 
1347  heap_get_root_tuples(page, root_offsets);
1349 
1350  root_blkno = hscan->rs_cblock;
1351  }
1352 
1353  if (snapshot == SnapshotAny)
1354  {
1355  /* do our own time qual check */
1356  bool indexIt;
1357  TransactionId xwait;
1358 
1359  recheck:
1360 
1361  /*
1362  * We could possibly get away with not locking the buffer here,
1363  * since caller should hold ShareLock on the relation, but let's
1364  * be conservative about it. (This remark is still correct even
1365  * with HOT-pruning: our pin on the buffer prevents pruning.)
1366  */
1368 
1369  /*
1370  * The criteria for counting a tuple as live in this block need to
1371  * match what analyze.c's heapam_scan_analyze_next_tuple() does,
1372  * otherwise CREATE INDEX and ANALYZE may produce wildly different
1373  * reltuples values, e.g. when there are many recently-dead
1374  * tuples.
1375  */
1376  switch (HeapTupleSatisfiesVacuum(heapTuple, OldestXmin,
1377  hscan->rs_cbuf))
1378  {
1379  case HEAPTUPLE_DEAD:
1380  /* Definitely dead, we can ignore it */
1381  indexIt = false;
1382  tupleIsAlive = false;
1383  break;
1384  case HEAPTUPLE_LIVE:
1385  /* Normal case, index and unique-check it */
1386  indexIt = true;
1387  tupleIsAlive = true;
1388  /* Count it as live, too */
1389  reltuples += 1;
1390  break;
1392 
1393  /*
1394  * If tuple is recently deleted then we must index it
1395  * anyway to preserve MVCC semantics. (Pre-existing
1396  * transactions could try to use the index after we finish
1397  * building it, and may need to see such tuples.)
1398  *
1399  * However, if it was HOT-updated then we must only index
1400  * the live tuple at the end of the HOT-chain. Since this
1401  * breaks semantics for pre-existing snapshots, mark the
1402  * index as unusable for them.
1403  *
1404  * We don't count recently-dead tuples in reltuples, even
1405  * if we index them; see heapam_scan_analyze_next_tuple().
1406  */
1407  if (HeapTupleIsHotUpdated(heapTuple))
1408  {
1409  indexIt = false;
1410  /* mark the index as unsafe for old snapshots */
1411  indexInfo->ii_BrokenHotChain = true;
1412  }
1413  else
1414  indexIt = true;
1415  /* In any case, exclude the tuple from unique-checking */
1416  tupleIsAlive = false;
1417  break;
1419 
1420  /*
1421  * In "anyvisible" mode, this tuple is visible and we
1422  * don't need any further checks.
1423  */
1424  if (anyvisible)
1425  {
1426  indexIt = true;
1427  tupleIsAlive = true;
1428  reltuples += 1;
1429  break;
1430  }
1431 
1432  /*
1433  * Since caller should hold ShareLock or better, normally
1434  * the only way to see this is if it was inserted earlier
1435  * in our own transaction. However, it can happen in
1436  * system catalogs, since we tend to release write lock
1437  * before commit there. Give a warning if neither case
1438  * applies.
1439  */
1440  xwait = HeapTupleHeaderGetXmin(heapTuple->t_data);
1442  {
1443  if (!is_system_catalog)
1444  elog(WARNING, "concurrent insert in progress within table \"%s\"",
1445  RelationGetRelationName(heapRelation));
1446 
1447  /*
1448  * If we are performing uniqueness checks, indexing
1449  * such a tuple could lead to a bogus uniqueness
1450  * failure. In that case we wait for the inserting
1451  * transaction to finish and check again.
1452  */
1453  if (checking_uniqueness)
1454  {
1455  /*
1456  * Must drop the lock on the buffer before we wait
1457  */
1459  XactLockTableWait(xwait, heapRelation,
1460  &heapTuple->t_self,
1463  goto recheck;
1464  }
1465  }
1466  else
1467  {
1468  /*
1469  * For consistency with
1470  * heapam_scan_analyze_next_tuple(), count
1471  * HEAPTUPLE_INSERT_IN_PROGRESS tuples as live only
1472  * when inserted by our own transaction.
1473  */
1474  reltuples += 1;
1475  }
1476 
1477  /*
1478  * We must index such tuples, since if the index build
1479  * commits then they're good.
1480  */
1481  indexIt = true;
1482  tupleIsAlive = true;
1483  break;
1485 
1486  /*
1487  * As with INSERT_IN_PROGRESS case, this is unexpected
1488  * unless it's our own deletion or a system catalog; but
1489  * in anyvisible mode, this tuple is visible.
1490  */
1491  if (anyvisible)
1492  {
1493  indexIt = true;
1494  tupleIsAlive = false;
1495  reltuples += 1;
1496  break;
1497  }
1498 
1499  xwait = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1501  {
1502  if (!is_system_catalog)
1503  elog(WARNING, "concurrent delete in progress within table \"%s\"",
1504  RelationGetRelationName(heapRelation));
1505 
1506  /*
1507  * If we are performing uniqueness checks, assuming
1508  * the tuple is dead could lead to missing a
1509  * uniqueness violation. In that case we wait for the
1510  * deleting transaction to finish and check again.
1511  *
1512  * Also, if it's a HOT-updated tuple, we should not
1513  * index it but rather the live tuple at the end of
1514  * the HOT-chain. However, the deleting transaction
1515  * could abort, possibly leaving this tuple as live
1516  * after all, in which case it has to be indexed. The
1517  * only way to know what to do is to wait for the
1518  * deleting transaction to finish and check again.
1519  */
1520  if (checking_uniqueness ||
1521  HeapTupleIsHotUpdated(heapTuple))
1522  {
1523  /*
1524  * Must drop the lock on the buffer before we wait
1525  */
1527  XactLockTableWait(xwait, heapRelation,
1528  &heapTuple->t_self,
1531  goto recheck;
1532  }
1533 
1534  /*
1535  * Otherwise index it but don't check for uniqueness,
1536  * the same as a RECENTLY_DEAD tuple.
1537  */
1538  indexIt = true;
1539 
1540  /*
1541  * Count HEAPTUPLE_DELETE_IN_PROGRESS tuples as live,
1542  * if they were not deleted by the current
1543  * transaction. That's what
1544  * heapam_scan_analyze_next_tuple() does, and we want
1545  * the behavior to be consistent.
1546  */
1547  reltuples += 1;
1548  }
1549  else if (HeapTupleIsHotUpdated(heapTuple))
1550  {
1551  /*
1552  * It's a HOT-updated tuple deleted by our own xact.
1553  * We can assume the deletion will commit (else the
1554  * index contents don't matter), so treat the same as
1555  * RECENTLY_DEAD HOT-updated tuples.
1556  */
1557  indexIt = false;
1558  /* mark the index as unsafe for old snapshots */
1559  indexInfo->ii_BrokenHotChain = true;
1560  }
1561  else
1562  {
1563  /*
1564  * It's a regular tuple deleted by our own xact. Index
1565  * it, but don't check for uniqueness nor count in
1566  * reltuples, the same as a RECENTLY_DEAD tuple.
1567  */
1568  indexIt = true;
1569  }
1570  /* In any case, exclude the tuple from unique-checking */
1571  tupleIsAlive = false;
1572  break;
1573  default:
1574  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1575  indexIt = tupleIsAlive = false; /* keep compiler quiet */
1576  break;
1577  }
1578 
1580 
1581  if (!indexIt)
1582  continue;
1583  }
1584  else
1585  {
1586  /* heap_getnext did the time qual check */
1587  tupleIsAlive = true;
1588  reltuples += 1;
1589  }
1590 
1592 
1593  /* Set up for predicate or expression evaluation */
1594  ExecStoreBufferHeapTuple(heapTuple, slot, hscan->rs_cbuf);
1595 
1596  /*
1597  * In a partial index, discard tuples that don't satisfy the
1598  * predicate.
1599  */
1600  if (predicate != NULL)
1601  {
1602  if (!ExecQual(predicate, econtext))
1603  continue;
1604  }
1605 
1606  /*
1607  * For the current heap tuple, extract all the attributes we use in
1608  * this index, and note which are null. This also performs evaluation
1609  * of any expressions needed.
1610  */
1611  FormIndexDatum(indexInfo,
1612  slot,
1613  estate,
1614  values,
1615  isnull);
1616 
1617  /*
1618  * You'd think we should go ahead and build the index tuple here, but
1619  * some index AMs want to do further processing on the data first. So
1620  * pass the values[] and isnull[] arrays, instead.
1621  */
1622 
1623  if (HeapTupleIsHeapOnly(heapTuple))
1624  {
1625  /*
1626  * For a heap-only tuple, pretend its TID is that of the root. See
1627  * src/backend/access/heap/README.HOT for discussion.
1628  */
1629  ItemPointerData tid;
1630  OffsetNumber offnum;
1631 
1632  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
1633 
1634  /*
1635  * If a HOT tuple points to a root that we don't know
1636  * about, obtain root items afresh. If that still fails,
1637  * report it as corruption.
1638  */
1639  if (root_offsets[offnum - 1] == InvalidOffsetNumber)
1640  {
1641  Page page = BufferGetPage(hscan->rs_cbuf);
1642 
1644  heap_get_root_tuples(page, root_offsets);
1646  }
1647 
1648  if (!OffsetNumberIsValid(root_offsets[offnum - 1]))
1649  ereport(ERROR,
1651  errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
1652  ItemPointerGetBlockNumber(&heapTuple->t_self),
1653  offnum,
1654  RelationGetRelationName(heapRelation))));
1655 
1656  ItemPointerSet(&tid, ItemPointerGetBlockNumber(&heapTuple->t_self),
1657  root_offsets[offnum - 1]);
1658 
1659  /* Call the AM's callback routine to process the tuple */
1660  callback(indexRelation, &tid, values, isnull, tupleIsAlive,
1661  callback_state);
1662  }
1663  else
1664  {
1665  /* Call the AM's callback routine to process the tuple */
1666  callback(indexRelation, &heapTuple->t_self, values, isnull,
1667  tupleIsAlive, callback_state);
1668  }
1669  }
1670 
1671  /* Report scan progress one last time. */
1672  if (progress)
1673  {
1674  BlockNumber blks_done;
1675 
1676  if (hscan->rs_base.rs_parallel != NULL)
1677  {
1679 
1681  blks_done = pbscan->phs_nblocks;
1682  }
1683  else
1684  blks_done = hscan->rs_nblocks;
1685 
1687  blks_done);
1688  }
1689 
1690  table_endscan(scan);
1691 
1692  /* we can now forget our snapshot, if set and registered by us */
1693  if (need_unregister_snapshot)
1694  UnregisterSnapshot(snapshot);
1695 
1697 
1698  FreeExecutorState(estate);
1699 
1700  /* These may have been pointing to the now-gone estate */
1701  indexInfo->ii_ExpressionsState = NIL;
1702  indexInfo->ii_PredicateState = NULL;
1703 
1704  return reltuples;
1705 }
1706 
1707 static void
1709  Relation indexRelation,
1710  IndexInfo *indexInfo,
1711  Snapshot snapshot,
1713 {
1714  TableScanDesc scan;
1715  HeapScanDesc hscan;
1716  HeapTuple heapTuple;
1718  bool isnull[INDEX_MAX_KEYS];
1719  ExprState *predicate;
1720  TupleTableSlot *slot;
1721  EState *estate;
1722  ExprContext *econtext;
1723  BlockNumber root_blkno = InvalidBlockNumber;
1724  OffsetNumber root_offsets[MaxHeapTuplesPerPage];
1725  bool in_index[MaxHeapTuplesPerPage];
1726  BlockNumber previous_blkno = InvalidBlockNumber;
1727 
1728  /* state variables for the merge */
1729  ItemPointer indexcursor = NULL;
1730  ItemPointerData decoded;
1731  bool tuplesort_empty = false;
1732 
1733  /*
1734  * sanity checks
1735  */
1736  Assert(OidIsValid(indexRelation->rd_rel->relam));
1737 
1738  /*
1739  * Need an EState for evaluation of index expressions and partial-index
1740  * predicates. Also a slot to hold the current tuple.
1741  */
1742  estate = CreateExecutorState();
1743  econtext = GetPerTupleExprContext(estate);
1744  slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation),
1745  &TTSOpsHeapTuple);
1746 
1747  /* Arrange for econtext's scan tuple to be the tuple under test */
1748  econtext->ecxt_scantuple = slot;
1749 
1750  /* Set up execution state for predicate, if any. */
1751  predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
1752 
1753  /*
1754  * Prepare for scan of the base relation. We need just those tuples
1755  * satisfying the passed-in reference snapshot. We must disable syncscan
1756  * here, because it's critical that we read from block zero forward to
1757  * match the sorted TIDs.
1758  */
1759  scan = table_beginscan_strat(heapRelation, /* relation */
1760  snapshot, /* snapshot */
1761  0, /* number of keys */
1762  NULL, /* scan key */
1763  true, /* buffer access strategy OK */
1764  false); /* syncscan not OK */
1765  hscan = (HeapScanDesc) scan;
1766 
1768  hscan->rs_nblocks);
1769 
1770  /*
1771  * Scan all tuples matching the snapshot.
1772  */
1773  while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
1774  {
1775  ItemPointer heapcursor = &heapTuple->t_self;
1776  ItemPointerData rootTuple;
1777  OffsetNumber root_offnum;
1778 
1780 
1781  state->htups += 1;
1782 
1783  if ((previous_blkno == InvalidBlockNumber) ||
1784  (hscan->rs_cblock != previous_blkno))
1785  {
1787  hscan->rs_cblock);
1788  previous_blkno = hscan->rs_cblock;
1789  }
1790 
1791  /*
1792  * As commented in table_index_build_scan, we should index heap-only
1793  * tuples under the TIDs of their root tuples; so when we advance onto
1794  * a new heap page, build a map of root item offsets on the page.
1795  *
1796  * This complicates merging against the tuplesort output: we will
1797  * visit the live tuples in order by their offsets, but the root
1798  * offsets that we need to compare against the index contents might be
1799  * ordered differently. So we might have to "look back" within the
1800  * tuplesort output, but only within the current page. We handle that
1801  * by keeping a bool array in_index[] showing all the
1802  * already-passed-over tuplesort output TIDs of the current page. We
1803  * clear that array here, when advancing onto a new heap page.
1804  */
1805  if (hscan->rs_cblock != root_blkno)
1806  {
1807  Page page = BufferGetPage(hscan->rs_cbuf);
1808 
1810  heap_get_root_tuples(page, root_offsets);
1812 
1813  memset(in_index, 0, sizeof(in_index));
1814 
1815  root_blkno = hscan->rs_cblock;
1816  }
1817 
1818  /* Convert actual tuple TID to root TID */
1819  rootTuple = *heapcursor;
1820  root_offnum = ItemPointerGetOffsetNumber(heapcursor);
1821 
1822  if (HeapTupleIsHeapOnly(heapTuple))
1823  {
1824  root_offnum = root_offsets[root_offnum - 1];
1825  if (!OffsetNumberIsValid(root_offnum))
1826  ereport(ERROR,
1828  errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
1829  ItemPointerGetBlockNumber(heapcursor),
1830  ItemPointerGetOffsetNumber(heapcursor),
1831  RelationGetRelationName(heapRelation))));
1832  ItemPointerSetOffsetNumber(&rootTuple, root_offnum);
1833  }
1834 
1835  /*
1836  * "merge" by skipping through the index tuples until we find or pass
1837  * the current root tuple.
1838  */
1839  while (!tuplesort_empty &&
1840  (!indexcursor ||
1841  ItemPointerCompare(indexcursor, &rootTuple) < 0))
1842  {
1843  Datum ts_val;
1844  bool ts_isnull;
1845 
1846  if (indexcursor)
1847  {
1848  /*
1849  * Remember index items seen earlier on the current heap page
1850  */
1851  if (ItemPointerGetBlockNumber(indexcursor) == root_blkno)
1852  in_index[ItemPointerGetOffsetNumber(indexcursor) - 1] = true;
1853  }
1854 
1855  tuplesort_empty = !tuplesort_getdatum(state->tuplesort, true,
1856  &ts_val, &ts_isnull, NULL);
1857  Assert(tuplesort_empty || !ts_isnull);
1858  if (!tuplesort_empty)
1859  {
1860  itemptr_decode(&decoded, DatumGetInt64(ts_val));
1861  indexcursor = &decoded;
1862 
1863  /* If int8 is pass-by-ref, free (encoded) TID Datum memory */
1864 #ifndef USE_FLOAT8_BYVAL
1865  pfree(DatumGetPointer(ts_val));
1866 #endif
1867  }
1868  else
1869  {
1870  /* Be tidy */
1871  indexcursor = NULL;
1872  }
1873  }
1874 
1875  /*
1876  * If the tuplesort has overshot *and* we didn't see a match earlier,
1877  * then this tuple is missing from the index, so insert it.
1878  */
1879  if ((tuplesort_empty ||
1880  ItemPointerCompare(indexcursor, &rootTuple) > 0) &&
1881  !in_index[root_offnum - 1])
1882  {
1884 
1885  /* Set up for predicate or expression evaluation */
1886  ExecStoreHeapTuple(heapTuple, slot, false);
1887 
1888  /*
1889  * In a partial index, discard tuples that don't satisfy the
1890  * predicate.
1891  */
1892  if (predicate != NULL)
1893  {
1894  if (!ExecQual(predicate, econtext))
1895  continue;
1896  }
1897 
1898  /*
1899  * For the current heap tuple, extract all the attributes we use
1900  * in this index, and note which are null. This also performs
1901  * evaluation of any expressions needed.
1902  */
1903  FormIndexDatum(indexInfo,
1904  slot,
1905  estate,
1906  values,
1907  isnull);
1908 
1909  /*
1910  * You'd think we should go ahead and build the index tuple here,
1911  * but some index AMs want to do further processing on the data
1912  * first. So pass the values[] and isnull[] arrays, instead.
1913  */
1914 
1915  /*
1916  * If the tuple is already committed dead, you might think we
1917  * could suppress uniqueness checking, but this is no longer true
1918  * in the presence of HOT, because the insert is actually a proxy
1919  * for a uniqueness check on the whole HOT-chain. That is, the
1920  * tuple we have here could be dead because it was already
1921  * HOT-updated, and if so the updating transaction will not have
1922  * thought it should insert index entries. The index AM will
1923  * check the whole HOT-chain and correctly detect a conflict if
1924  * there is one.
1925  */
1926 
1927  index_insert(indexRelation,
1928  values,
1929  isnull,
1930  &rootTuple,
1931  heapRelation,
1932  indexInfo->ii_Unique ?
1934  indexInfo);
1935 
1936  state->tups_inserted += 1;
1937  }
1938  }
1939 
1940  table_endscan(scan);
1941 
1943 
1944  FreeExecutorState(estate);
1945 
1946  /* These may have been pointing to the now-gone estate */
1947  indexInfo->ii_ExpressionsState = NIL;
1948  indexInfo->ii_PredicateState = NULL;
1949 }
1950 
1951 /*
1952  * Return the number of blocks that have been read by this scan since
1953  * starting. This is meant for progress reporting rather than be fully
1954  * accurate: in a parallel scan, workers can be concurrently reading blocks
1955  * further ahead than what we report.
1956  */
1957 static BlockNumber
1959 {
1960  ParallelBlockTableScanDesc bpscan = NULL;
1961  BlockNumber startblock;
1962  BlockNumber blocks_done;
1963 
1964  if (hscan->rs_base.rs_parallel != NULL)
1965  {
1967  startblock = bpscan->phs_startblock;
1968  }
1969  else
1970  startblock = hscan->rs_startblock;
1971 
1972  /*
1973  * Might have wrapped around the end of the relation, if startblock was
1974  * not zero.
1975  */
1976  if (hscan->rs_cblock > startblock)
1977  blocks_done = hscan->rs_cblock - startblock;
1978  else
1979  {
1980  BlockNumber nblocks;
1981 
1982  nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
1983  blocks_done = nblocks - startblock +
1984  hscan->rs_cblock;
1985  }
1986 
1987  return blocks_done;
1988 }
1989 
1990 
1991 /* ------------------------------------------------------------------------
1992  * Miscellaneous callbacks for the heap AM
1993  * ------------------------------------------------------------------------
1994  */
1995 
1996 /*
1997  * Check to see whether the table needs a TOAST table. It does only if
1998  * (1) there are any toastable attributes, and (2) the maximum length
1999  * of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to
2000  * create a toast table for something like "f1 varchar(20)".)
2001  */
2002 static bool
2004 {
2005  int32 data_length = 0;
2006  bool maxlength_unknown = false;
2007  bool has_toastable_attrs = false;
2008  TupleDesc tupdesc = rel->rd_att;
2009  int32 tuple_length;
2010  int i;
2011 
2012  for (i = 0; i < tupdesc->natts; i++)
2013  {
2014  Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2015 
2016  if (att->attisdropped)
2017  continue;
2018  data_length = att_align_nominal(data_length, att->attalign);
2019  if (att->attlen > 0)
2020  {
2021  /* Fixed-length types are never toastable */
2022  data_length += att->attlen;
2023  }
2024  else
2025  {
2026  int32 maxlen = type_maximum_size(att->atttypid,
2027  att->atttypmod);
2028 
2029  if (maxlen < 0)
2030  maxlength_unknown = true;
2031  else
2032  data_length += maxlen;
2033  if (att->attstorage != TYPSTORAGE_PLAIN)
2034  has_toastable_attrs = true;
2035  }
2036  }
2037  if (!has_toastable_attrs)
2038  return false; /* nothing to toast? */
2039  if (maxlength_unknown)
2040  return true; /* any unlimited-length attrs? */
2041  tuple_length = MAXALIGN(SizeofHeapTupleHeader +
2042  BITMAPLEN(tupdesc->natts)) +
2043  MAXALIGN(data_length);
2044  return (tuple_length > TOAST_TUPLE_THRESHOLD);
2045 }
2046 
2047 /*
2048  * TOAST tables for heap relations are just heap relations.
2049  */
2050 static Oid
2052 {
2053  return rel->rd_rel->relam;
2054 }
2055 
2056 
2057 /* ------------------------------------------------------------------------
2058  * Planner related callbacks for the heap AM
2059  * ------------------------------------------------------------------------
2060  */
2061 
2062 #define HEAP_OVERHEAD_BYTES_PER_TUPLE \
2063  (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))
2064 #define HEAP_USABLE_BYTES_PER_PAGE \
2065  (BLCKSZ - SizeOfPageHeaderData)
2066 
2067 static void
2069  BlockNumber *pages, double *tuples,
2070  double *allvisfrac)
2071 {
2072  table_block_relation_estimate_size(rel, attr_widths, pages,
2073  tuples, allvisfrac,
2076 }
2077 
2078 
2079 /* ------------------------------------------------------------------------
2080  * Executor related callbacks for the heap AM
2081  * ------------------------------------------------------------------------
2082  */
2083 
2084 static bool
2086  TBMIterateResult *tbmres)
2087 {
2088  HeapScanDesc hscan = (HeapScanDesc) scan;
2089  BlockNumber page = tbmres->blockno;
2090  Buffer buffer;
2091  Snapshot snapshot;
2092  int ntup;
2093 
2094  hscan->rs_cindex = 0;
2095  hscan->rs_ntuples = 0;
2096 
2097  /*
2098  * Ignore any claimed entries past what we think is the end of the
2099  * relation. It may have been extended after the start of our scan (we
2100  * only hold an AccessShareLock, and it could be inserts from this
2101  * backend).
2102  */
2103  if (page >= hscan->rs_nblocks)
2104  return false;
2105 
2106  /*
2107  * Acquire pin on the target heap page, trading in any pin we held before.
2108  */
2109  hscan->rs_cbuf = ReleaseAndReadBuffer(hscan->rs_cbuf,
2110  scan->rs_rd,
2111  page);
2112  hscan->rs_cblock = page;
2113  buffer = hscan->rs_cbuf;
2114  snapshot = scan->rs_snapshot;
2115 
2116  ntup = 0;
2117 
2118  /*
2119  * Prune and repair fragmentation for the whole page, if possible.
2120  */
2121  heap_page_prune_opt(scan->rs_rd, buffer);
2122 
2123  /*
2124  * We must hold share lock on the buffer content while examining tuple
2125  * visibility. Afterwards, however, the tuples we have found to be
2126  * visible are guaranteed good as long as we hold the buffer pin.
2127  */
2128  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2129 
2130  /*
2131  * We need two separate strategies for lossy and non-lossy cases.
2132  */
2133  if (tbmres->ntuples >= 0)
2134  {
2135  /*
2136  * Bitmap is non-lossy, so we just look through the offsets listed in
2137  * tbmres; but we have to follow any HOT chain starting at each such
2138  * offset.
2139  */
2140  int curslot;
2141 
2142  for (curslot = 0; curslot < tbmres->ntuples; curslot++)
2143  {
2144  OffsetNumber offnum = tbmres->offsets[curslot];
2145  ItemPointerData tid;
2146  HeapTupleData heapTuple;
2147 
2148  ItemPointerSet(&tid, page, offnum);
2149  if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
2150  &heapTuple, NULL, true))
2151  hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
2152  }
2153  }
2154  else
2155  {
2156  /*
2157  * Bitmap is lossy, so we must examine each line pointer on the page.
2158  * But we can ignore HOT chains, since we'll check each tuple anyway.
2159  */
2160  Page dp = (Page) BufferGetPage(buffer);
2161  OffsetNumber maxoff = PageGetMaxOffsetNumber(dp);
2162  OffsetNumber offnum;
2163 
2164  for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
2165  {
2166  ItemId lp;
2167  HeapTupleData loctup;
2168  bool valid;
2169 
2170  lp = PageGetItemId(dp, offnum);
2171  if (!ItemIdIsNormal(lp))
2172  continue;
2173  loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
2174  loctup.t_len = ItemIdGetLength(lp);
2175  loctup.t_tableOid = scan->rs_rd->rd_id;
2176  ItemPointerSet(&loctup.t_self, page, offnum);
2177  valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
2178  if (valid)
2179  {
2180  hscan->rs_vistuples[ntup++] = offnum;
2181  PredicateLockTID(scan->rs_rd, &loctup.t_self, snapshot,
2182  HeapTupleHeaderGetXmin(loctup.t_data));
2183  }
2184  HeapCheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
2185  buffer, snapshot);
2186  }
2187  }
2188 
2189  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2190 
2191  Assert(ntup <= MaxHeapTuplesPerPage);
2192  hscan->rs_ntuples = ntup;
2193 
2194  return ntup > 0;
2195 }
2196 
2197 static bool
2199  TBMIterateResult *tbmres,
2200  TupleTableSlot *slot)
2201 {
2202  HeapScanDesc hscan = (HeapScanDesc) scan;
2203  OffsetNumber targoffset;
2204  Page dp;
2205  ItemId lp;
2206 
2207  /*
2208  * Out of range? If so, nothing more to look at on this page
2209  */
2210  if (hscan->rs_cindex < 0 || hscan->rs_cindex >= hscan->rs_ntuples)
2211  return false;
2212 
2213  targoffset = hscan->rs_vistuples[hscan->rs_cindex];
2214  dp = (Page) BufferGetPage(hscan->rs_cbuf);
2215  lp = PageGetItemId(dp, targoffset);
2216  Assert(ItemIdIsNormal(lp));
2217 
2218  hscan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
2219  hscan->rs_ctup.t_len = ItemIdGetLength(lp);
2220  hscan->rs_ctup.t_tableOid = scan->rs_rd->rd_id;
2221  ItemPointerSet(&hscan->rs_ctup.t_self, hscan->rs_cblock, targoffset);
2222 
2224 
2225  /*
2226  * Set up the result slot to point to this tuple. Note that the slot
2227  * acquires a pin on the buffer.
2228  */
2230  slot,
2231  hscan->rs_cbuf);
2232 
2233  hscan->rs_cindex++;
2234 
2235  return true;
2236 }
2237 
2238 static bool
2240 {
2241  HeapScanDesc hscan = (HeapScanDesc) scan;
2242  TsmRoutine *tsm = scanstate->tsmroutine;
2243  BlockNumber blockno;
2244 
2245  /* return false immediately if relation is empty */
2246  if (hscan->rs_nblocks == 0)
2247  return false;
2248 
2249  if (tsm->NextSampleBlock)
2250  {
2251  blockno = tsm->NextSampleBlock(scanstate, hscan->rs_nblocks);
2252  hscan->rs_cblock = blockno;
2253  }
2254  else
2255  {
2256  /* scanning table sequentially */
2257 
2258  if (hscan->rs_cblock == InvalidBlockNumber)
2259  {
2260  Assert(!hscan->rs_inited);
2261  blockno = hscan->rs_startblock;
2262  }
2263  else
2264  {
2265  Assert(hscan->rs_inited);
2266 
2267  blockno = hscan->rs_cblock + 1;
2268 
2269  if (blockno >= hscan->rs_nblocks)
2270  {
2271  /* wrap to beginning of rel, might not have started at 0 */
2272  blockno = 0;
2273  }
2274 
2275  /*
2276  * Report our new scan position for synchronization purposes.
2277  *
2278  * Note: we do this before checking for end of scan so that the
2279  * final state of the position hint is back at the start of the
2280  * rel. That's not strictly necessary, but otherwise when you run
2281  * the same query multiple times the starting position would shift
2282  * a little bit backwards on every invocation, which is confusing.
2283  * We don't guarantee any specific ordering in general, though.
2284  */
2285  if (scan->rs_flags & SO_ALLOW_SYNC)
2286  ss_report_location(scan->rs_rd, blockno);
2287 
2288  if (blockno == hscan->rs_startblock)
2289  {
2290  blockno = InvalidBlockNumber;
2291  }
2292  }
2293  }
2294 
2295  if (!BlockNumberIsValid(blockno))
2296  {
2297  if (BufferIsValid(hscan->rs_cbuf))
2298  ReleaseBuffer(hscan->rs_cbuf);
2299  hscan->rs_cbuf = InvalidBuffer;
2300  hscan->rs_cblock = InvalidBlockNumber;
2301  hscan->rs_inited = false;
2302 
2303  return false;
2304  }
2305 
2306  heapgetpage(scan, blockno);
2307  hscan->rs_inited = true;
2308 
2309  return true;
2310 }
2311 
2312 static bool
2314  TupleTableSlot *slot)
2315 {
2316  HeapScanDesc hscan = (HeapScanDesc) scan;
2317  TsmRoutine *tsm = scanstate->tsmroutine;
2318  BlockNumber blockno = hscan->rs_cblock;
2319  bool pagemode = (scan->rs_flags & SO_ALLOW_PAGEMODE) != 0;
2320 
2321  Page page;
2322  bool all_visible;
2323  OffsetNumber maxoffset;
2324 
2325  /*
2326  * When not using pagemode, we must lock the buffer during tuple
2327  * visibility checks.
2328  */
2329  if (!pagemode)
2331 
2332  page = (Page) BufferGetPage(hscan->rs_cbuf);
2333  all_visible = PageIsAllVisible(page) &&
2335  maxoffset = PageGetMaxOffsetNumber(page);
2336 
2337  for (;;)
2338  {
2339  OffsetNumber tupoffset;
2340 
2342 
2343  /* Ask the tablesample method which tuples to check on this page. */
2344  tupoffset = tsm->NextSampleTuple(scanstate,
2345  blockno,
2346  maxoffset);
2347 
2348  if (OffsetNumberIsValid(tupoffset))
2349  {
2350  ItemId itemid;
2351  bool visible;
2352  HeapTuple tuple = &(hscan->rs_ctup);
2353 
2354  /* Skip invalid tuple pointers. */
2355  itemid = PageGetItemId(page, tupoffset);
2356  if (!ItemIdIsNormal(itemid))
2357  continue;
2358 
2359  tuple->t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2360  tuple->t_len = ItemIdGetLength(itemid);
2361  ItemPointerSet(&(tuple->t_self), blockno, tupoffset);
2362 
2363 
2364  if (all_visible)
2365  visible = true;
2366  else
2367  visible = SampleHeapTupleVisible(scan, hscan->rs_cbuf,
2368  tuple, tupoffset);
2369 
2370  /* in pagemode, heapgetpage did this for us */
2371  if (!pagemode)
2372  HeapCheckForSerializableConflictOut(visible, scan->rs_rd, tuple,
2373  hscan->rs_cbuf, scan->rs_snapshot);
2374 
2375  /* Try next tuple from same page. */
2376  if (!visible)
2377  continue;
2378 
2379  /* Found visible tuple, return it. */
2380  if (!pagemode)
2382 
2383  ExecStoreBufferHeapTuple(tuple, slot, hscan->rs_cbuf);
2384 
2385  /* Count successfully-fetched tuples as heap fetches */
2387 
2388  return true;
2389  }
2390  else
2391  {
2392  /*
2393  * If we get here, it means we've exhausted the items on this page
2394  * and it's time to move to the next.
2395  */
2396  if (!pagemode)
2398 
2399  ExecClearTuple(slot);
2400  return false;
2401  }
2402  }
2403 
2404  Assert(0);
2405 }
2406 
2407 
2408 /* ----------------------------------------------------------------------------
2409  * Helper functions for the above.
2410  * ----------------------------------------------------------------------------
2411  */
2412 
2413 /*
2414  * Reconstruct and rewrite the given tuple
2415  *
2416  * We cannot simply copy the tuple as-is, for several reasons:
2417  *
2418  * 1. We'd like to squeeze out the values of any dropped columns, both
2419  * to save space and to ensure we have no corner-case failures. (It's
2420  * possible for example that the new table hasn't got a TOAST table
2421  * and so is unable to store any large values of dropped cols.)
2422  *
2423  * 2. The tuple might not even be legal for the new table; this is
2424  * currently only known to happen as an after-effect of ALTER TABLE
2425  * SET WITHOUT OIDS.
2426  *
2427  * So, we must reconstruct the tuple from component Datums.
2428  */
2429 static void
2431  Relation OldHeap, Relation NewHeap,
2432  Datum *values, bool *isnull, RewriteState rwstate)
2433 {
2434  TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
2435  TupleDesc newTupDesc = RelationGetDescr(NewHeap);
2436  HeapTuple copiedTuple;
2437  int i;
2438 
2439  heap_deform_tuple(tuple, oldTupDesc, values, isnull);
2440 
2441  /* Be sure to null out any dropped columns */
2442  for (i = 0; i < newTupDesc->natts; i++)
2443  {
2444  if (TupleDescAttr(newTupDesc, i)->attisdropped)
2445  isnull[i] = true;
2446  }
2447 
2448  copiedTuple = heap_form_tuple(newTupDesc, values, isnull);
2449 
2450  /* The heap rewrite module does the rest */
2451  rewrite_heap_tuple(rwstate, tuple, copiedTuple);
2452 
2453  heap_freetuple(copiedTuple);
2454 }
2455 
2456 /*
2457  * Check visibility of the tuple.
2458  */
2459 static bool
2461  HeapTuple tuple,
2462  OffsetNumber tupoffset)
2463 {
2464  HeapScanDesc hscan = (HeapScanDesc) scan;
2465 
2466  if (scan->rs_flags & SO_ALLOW_PAGEMODE)
2467  {
2468  /*
2469  * In pageatatime mode, heapgetpage() already did visibility checks,
2470  * so just look at the info it left in rs_vistuples[].
2471  *
2472  * We use a binary search over the known-sorted array. Note: we could
2473  * save some effort if we insisted that NextSampleTuple select tuples
2474  * in increasing order, but it's not clear that there would be enough
2475  * gain to justify the restriction.
2476  */
2477  int start = 0,
2478  end = hscan->rs_ntuples - 1;
2479 
2480  while (start <= end)
2481  {
2482  int mid = (start + end) / 2;
2483  OffsetNumber curoffset = hscan->rs_vistuples[mid];
2484 
2485  if (tupoffset == curoffset)
2486  return true;
2487  else if (tupoffset < curoffset)
2488  end = mid - 1;
2489  else
2490  start = mid + 1;
2491  }
2492 
2493  return false;
2494  }
2495  else
2496  {
2497  /* Otherwise, we have to check the tuple individually. */
2498  return HeapTupleSatisfiesVisibility(tuple, scan->rs_snapshot,
2499  buffer);
2500  }
2501 }
2502 
2503 
2504 /* ------------------------------------------------------------------------
2505  * Definition of the heap table access method.
2506  * ------------------------------------------------------------------------
2507  */
2508 
2509 static const TableAmRoutine heapam_methods = {
2511 
2512  .slot_callbacks = heapam_slot_callbacks,
2513 
2514  .scan_begin = heap_beginscan,
2515  .scan_end = heap_endscan,
2516  .scan_rescan = heap_rescan,
2517  .scan_getnextslot = heap_getnextslot,
2518 
2519  .parallelscan_estimate = table_block_parallelscan_estimate,
2520  .parallelscan_initialize = table_block_parallelscan_initialize,
2521  .parallelscan_reinitialize = table_block_parallelscan_reinitialize,
2522 
2523  .index_fetch_begin = heapam_index_fetch_begin,
2524  .index_fetch_reset = heapam_index_fetch_reset,
2525  .index_fetch_end = heapam_index_fetch_end,
2526  .index_fetch_tuple = heapam_index_fetch_tuple,
2527 
2528  .tuple_insert = heapam_tuple_insert,
2529  .tuple_insert_speculative = heapam_tuple_insert_speculative,
2530  .tuple_complete_speculative = heapam_tuple_complete_speculative,
2531  .multi_insert = heap_multi_insert,
2532  .tuple_delete = heapam_tuple_delete,
2533  .tuple_update = heapam_tuple_update,
2534  .tuple_lock = heapam_tuple_lock,
2535 
2536  .tuple_fetch_row_version = heapam_fetch_row_version,
2537  .tuple_get_latest_tid = heap_get_latest_tid,
2538  .tuple_tid_valid = heapam_tuple_tid_valid,
2539  .tuple_satisfies_snapshot = heapam_tuple_satisfies_snapshot,
2540  .compute_xid_horizon_for_tuples = heap_compute_xid_horizon_for_tuples,
2541 
2542  .relation_set_new_filenode = heapam_relation_set_new_filenode,
2543  .relation_nontransactional_truncate = heapam_relation_nontransactional_truncate,
2544  .relation_copy_data = heapam_relation_copy_data,
2545  .relation_copy_for_cluster = heapam_relation_copy_for_cluster,
2546  .relation_vacuum = heap_vacuum_rel,
2547  .scan_analyze_next_block = heapam_scan_analyze_next_block,
2548  .scan_analyze_next_tuple = heapam_scan_analyze_next_tuple,
2549  .index_build_range_scan = heapam_index_build_range_scan,
2550  .index_validate_scan = heapam_index_validate_scan,
2551 
2552  .relation_size = table_block_relation_size,
2553  .relation_needs_toast_table = heapam_relation_needs_toast_table,
2554  .relation_toast_am = heapam_relation_toast_am,
2555  .relation_fetch_toast_slice = heap_fetch_toast_slice,
2556 
2557  .relation_estimate_size = heapam_estimate_rel_size,
2558 
2559  .scan_bitmap_next_block = heapam_scan_bitmap_next_block,
2560  .scan_bitmap_next_tuple = heapam_scan_bitmap_next_tuple,
2561  .scan_sample_next_block = heapam_scan_sample_next_block,
2562  .scan_sample_next_tuple = heapam_scan_sample_next_tuple
2563 };
2564 
2565 
2566 const TableAmRoutine *
2568 {
2569  return &heapam_methods;
2570 }
2571 
2572 Datum
2574 {
2575  PG_RETURN_POINTER(&heapam_methods);
2576 }
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:91
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
void FormIndexDatum(IndexInfo *indexInfo, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
Definition: index.c:2585
void heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
Definition: heapam.c:1859
#define PG_RETURN_POINTER(x)
Definition: fmgr.h:360
#define NIL
Definition: pg_list.h:65
Oid tts_tableOid
Definition: tuptable.h:131
uint32 CommandId
Definition: c.h:534
ItemPointerData ctid
Definition: tableam.h:125
#define PROGRESS_CLUSTER_PHASE_SEQ_SCAN_HEAP
Definition: progress.h:65
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
bool tuplesort_getdatum(Tuplesortstate *state, bool forward, Datum *val, bool *isNull, Datum *abbrev)
Definition: tuplesort.c:2475
TransactionId heap_compute_xid_horizon_for_tuples(Relation rel, ItemPointerData *tids, int nitems)
Definition: heapam.c:6992
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
BlockNumber rs_cblock
Definition: heapam.h:59
LockTupleMode
Definition: lockoptions.h:49
NodeTag type
Definition: tableam.h:166
void tuplesort_performsort(Tuplesortstate *state)
Definition: tuplesort.c:2021
void heap_abort_speculative(Relation relation, ItemPointer tid)
Definition: heapam.c:5566
void end_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:301
HeapTuple tuplesort_getheaptuple(Tuplesortstate *state, bool forward)
Definition: tuplesort.c:2426
void smgrclose(SMgrRelation reln)
Definition: smgr.c:257
void table_block_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.c:418
#define InitDirtySnapshot(snapshotdata)
Definition: snapmgr.h:75
static bool heapam_tuple_tid_valid(TableScanDesc scan, ItemPointer tid)
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:334
List * ii_Predicate
Definition: execnodes.h:163
static void heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot, uint32 specToken, bool succeeded)
#define att_align_nominal(cur_offset, attalign)
Definition: tupmacs.h:148
uint64 table_block_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.c:628
bool IsSystemRelation(Relation relation)
Definition: catalog.c:68
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:385
#define PROGRESS_CLUSTER_HEAP_TUPLES_WRITTEN
Definition: progress.h:59
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
uint32 TransactionId
Definition: c.h:520
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:810
static const TupleTableSlotOps * heapam_slot_callbacks(Relation relation)
void heap_endscan(TableScanDesc sscan)
Definition: heapam.c:1256
#define RelationGetDescr(relation)
Definition: rel.h:482
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1208
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
static void heapam_index_fetch_end(IndexFetchTableData *scan)
static void reform_and_rewrite_tuple(HeapTuple tuple, Relation OldHeap, Relation NewHeap, Datum *values, bool *isnull, RewriteState rwstate)
static TM_Result heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, bool *update_indexes)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
CommandId HeapTupleHeaderGetCmin(HeapTupleHeader tup)
Definition: combocid.c:104
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf)
Definition: heapam.c:1394
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:82
TableScanDescData rs_base
Definition: heapam.h:49
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3231
ExprState * ii_PredicateState
Definition: execnodes.h:164
void heap_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, int32 sliceoffset, int32 slicelength, struct varlena *result)
Definition: heaptoast.c:626
const TupleTableSlotOps TTSOpsBufferHeapTuple
Definition: execTuples.c:86
MemoryContext ecxt_per_tuple_memory
Definition: execnodes.h:234
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:653
CommandId cmax
Definition: tableam.h:127
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
unsigned char uint8
Definition: c.h:372
#define HeapTupleHeaderSetSpeculativeToken(tup, token)
Definition: htup_details.h:440
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
Tuplesortstate * tuplesort_begin_cluster(TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, bool randomAccess)
Definition: tuplesort.c:952
#define InvalidBuffer
Definition: buf.h:25
void index_rescan(IndexScanDesc scan, ScanKey keys, int nkeys, ScanKey orderbys, int norderbys)
Definition: indexam.c:295
static void heapam_relation_set_new_filenode(Relation rel, const RelFileNode *newrnode, char persistence, TransactionId *freezeXid, MultiXactId *minmulti)
HeapTuple tuple
Definition: tuptable.h:250
int errcode(int sqlerrcode)
Definition: elog.c:610
TransactionId RecentXmin
Definition: snapmgr.c:113
#define PROGRESS_CLUSTER_INDEX_RELID
Definition: progress.h:57
#define PROGRESS_CLUSTER_PHASE_WRITE_NEW_HEAP
Definition: progress.h:68
static TM_Result heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd)
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:137
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3511
#define BITMAPLEN(NATTS)
Definition: htup_details.h:547
static void heapam_relation_nontransactional_truncate(Relation rel)
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:1020
static bool heapam_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, Snapshot snapshot)
#define TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS
Definition: tableam.h:139
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:248
static bool table_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition: tableam.h:904
static Oid heapam_relation_toast_am(Relation rel)
Form_pg_class rd_rel
Definition: rel.h:109
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
unsigned int Oid
Definition: postgres_ext.h:31
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition: executor.h:370
uint32 rs_flags
Definition: relscan.h:43
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:250
#define OidIsValid(objectId)
Definition: c.h:651
static IndexFetchTableData * heapam_index_fetch_begin(Relation rel)
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
#define RelationGetTargetBlock(relation)
Definition: rel.h:541
Size table_block_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan)
Definition: tableam.c:400
static TableScanDesc table_beginscan_strat(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, bool allow_strat, bool allow_sync)
Definition: tableam.h:779
static void heapam_index_fetch_reset(IndexFetchTableData *scan)
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:37
static bool heapam_scan_bitmap_next_block(TableScanDesc scan, TBMIterateResult *tbmres)
void heapgetpage(TableScanDesc sscan, BlockNumber page)
Definition: heapam.c:353
signed int int32
Definition: c.h:362
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:712
HeapTupleData rs_ctup
Definition: heapam.h:66
uint16 OffsetNumber
Definition: off.h:24
Size table_block_parallelscan_estimate(Relation rel)
Definition: tableam.c:394
HeapTupleHeader t_data
Definition: htup.h:68
BlockNumber blockno
Definition: tidbitmap.h:42
#define RelationOpenSmgr(relation)
Definition: rel.h:513
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:1509
void FreeExecutorState(EState *estate)
Definition: execUtils.c:191
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:676
#define GetPerTupleExprContext(estate)
Definition: executor.h:507
List * ii_ExpressionsState
Definition: execnodes.h:162
static const TableAmRoutine heapam_methods
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void pfree(void *pointer)
Definition: mcxt.c:1057
SMgrRelation RelationCreateStorage(RelFileNode rnode, char relpersistence)
Definition: storage.c:118
NextSampleTuple_function NextSampleTuple
Definition: tsmapi.h:74
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3534
TransactionId xmax
Definition: tableam.h:126
TransactionId GetOldestNonRemovableTransactionId(Relation rel)
Definition: procarray.c:1870
#define ERROR
Definition: elog.h:43
static void heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, Relation OldIndex, bool use_sort, TransactionId OldestXmin, TransactionId *xid_cutoff, MultiXactId *multi_cutoff, double *num_tuples, double *tups_vacuumed, double *tups_recently_dead)
static TableScanDesc table_beginscan(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key)
Definition: tableam.h:755
ItemPointerData t_ctid
Definition: htup_details.h:160
int32 type_maximum_size(Oid type_oid, int32 typemod)
Definition: format_type.c:408
ItemPointerData t_self
Definition: htup.h:65
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
static bool SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer, HeapTuple tuple, OffsetNumber tupoffset)
static bool heapam_scan_sample_next_block(TableScanDesc scan, SampleScanState *scanstate)
#define DatumGetInt64(X)
Definition: postgres.h:607
static void heapam_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate)
#define pgstat_count_heap_fetch(rel)
Definition: pgstat.h:1412
Tuplesortstate * tuplesort
Definition: index.h:35
uint32 t_len
Definition: htup.h:64
#define PROGRESS_CLUSTER_PHASE_SORT_TUPLES
Definition: progress.h:67
Buffer xs_cbuf
Definition: heapam.h:82
static char * buf
Definition: pg_test_fsync.c:67
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition: heapam.c:1286
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]
Definition: tidbitmap.h:46
#define HEAP_OVERHEAD_BYTES_PER_TUPLE
#define FirstOffsetNumber
Definition: off.h:27
void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
Definition: heapam.c:330
#define PROGRESS_CLUSTER_HEAP_TUPLES_SCANNED
Definition: progress.h:58
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:490
ExprState * ExecPrepareQual(List *qual, EState *estate)
Definition: execExpr.c:520
static TransactionId OldestXmin
Definition: vacuumlazy.c:335
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:193
static bool heapam_scan_sample_next_tuple(TableScanDesc scan, SampleScanState *scanstate, TupleTableSlot *slot)
bool ii_BrokenHotChain
Definition: execnodes.h:175
unsigned int uint32
Definition: c.h:374
Oid t_tableOid
Definition: htup.h:66
TransactionId xmax
Definition: snapshot.h:158
TransactionId xmin
Definition: snapshot.h:157
static void heapam_tuple_insert_speculative(Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate, uint32 specToken)
bool rs_inited
Definition: heapam.h:58
NextSampleBlock_function NextSampleBlock
Definition: tsmapi.h:73
void index_endscan(IndexScanDesc scan)
Definition: indexam.c:321
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
BlockNumber rs_startblock
Definition: heapam.h:53
static void heapam_estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
static bool heapam_scan_bitmap_next_tuple(TableScanDesc scan, TBMIterateResult *tbmres, TupleTableSlot *slot)
void heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
Definition: pruneheap.c:883
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1614
Datum heap_tableam_handler(PG_FUNCTION_ARGS)
Oid rd_id
Definition: rel.h:111
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
ForkNumber
Definition: relpath.h:40
EState * CreateExecutorState(void)
Definition: execUtils.c:89
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:852
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
int rs_ntuples
Definition: heapam.h:70
static bool heapam_fetch_row_version(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot)
#define WARNING
Definition: elog.h:40
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
int progress
Definition: pgbench.c:235
#define TOAST_TUPLE_THRESHOLD
Definition: heaptoast.h:48
#define HEAP_USABLE_BYTES_PER_PAGE
TM_Result
Definition: tableam.h:70
void table_block_relation_estimate_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
Definition: tableam.c:668
MultiXactId GetOldestMultiXactId(void)
Definition: multixact.c:2482
void * palloc0(Size size)
Definition: mcxt.c:981
#define PROGRESS_SCAN_BLOCKS_DONE
Definition: progress.h:120
void heap_finish_speculative(Relation relation, ItemPointer tid)
Definition: heapam.c:5475
void RelationDropStorage(Relation rel)
Definition: storage.c:195
uintptr_t Datum
Definition: postgres.h:367
#define TTS_IS_BUFFERTUPLE(slot)
Definition: tuptable.h:231
void heap_vacuum_rel(Relation onerel, VacuumParams *params, BufferAccessStrategy bstrategy)
Definition: vacuumlazy.c:419
static double heapam_index_build_range_scan(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, bool allow_sync, bool anyvisible, bool progress, BlockNumber start_blockno, BlockNumber numblocks, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
BlockNumber rs_nblocks
Definition: heapam.h:52
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3750
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2543
#define ItemPointerIndicatesMovedPartitions(pointer)
Definition: itemptr.h:184
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
TupleDesc rd_att
Definition: rel.h:110
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define InvalidOffsetNumber
Definition: off.h:26
static void itemptr_decode(ItemPointer itemptr, int64 encoded)
Definition: index.h:191
void RelationCopyStorage(SMgrRelation src, SMgrRelation dst, ForkNumber forkNum, char relpersistence)
Definition: storage.c:408
#define ereport(elevel,...)
Definition: elog.h:144
int maintenance_work_mem
Definition: globals.c:123
static bool heapam_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9019
Buffer rs_cbuf
Definition: heapam.h:60
static void heapam_relation_copy_data(Relation rel, const RelFileNode *newrnode)
TransactionId MultiXactId
Definition: c.h:530
int errmsg_internal(const char *fmt,...)
Definition: elog.c:911
bool ii_Unique
Definition: execnodes.h:172
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:639
BackendId rd_backend
Definition: rel.h:59
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
#define Assert(condition)
Definition: c.h:745
#define PROGRESS_CLUSTER_PHASE_INDEX_SCAN_HEAP
Definition: progress.h:66
TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags)
Definition: heapam.c:1141
Definition: regguts.h:298
double tups_inserted
Definition: index.h:37
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
Definition: heapam.h:71
Definition: tableam.h:76
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
bool takenDuringRecovery
Definition: snapshot.h:184
void FlushRelationBuffers(Relation rel)
Definition: bufmgr.c:3252
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
Definition: pgstat.c:3253
#define INDEX_MAX_KEYS
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
TM_Result heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode)
Definition: heapam.c:2893
#define InvalidBlockNumber
Definition: block.h:33
static TM_Result heapam_tuple_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
bool index_getnext_slot(IndexScanDesc scan, ScanDirection direction, TupleTableSlot *slot)
Definition: indexam.c:614
#define MAX_FORKNUM
Definition: relpath.h:55
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:1532
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:226
#define MAXALIGN(LEN)
Definition: c.h:698
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define PROGRESS_SCAN_BLOCKS_TOTAL
Definition: progress.h:119
struct TsmRoutine * tsmroutine
Definition: execnodes.h:1357
#define TUPLE_LOCK_FLAG_FIND_LAST_VERSION
Definition: tableam.h:141
void ss_report_location(Relation rel, BlockNumber location)
Definition: syncscan.c:288
TupleTableSlot * ExecStorePinnedBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1388
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
bool ii_Concurrent
Definition: execnodes.h:174
#define SnapshotAny
Definition: snapmgr.h:68
TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
Definition: heapam.c:3973
#define DatumGetPointer(X)
Definition: postgres.h:549
Relation rs_rd
Definition: relscan.h:34
double htups
Definition: index.h:37
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:148
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1249
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:863
static Datum values[MAXATTR]
Definition: bootstrap.c:165
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:393
bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
Definition: rewriteheap.c:565
bool heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition: heapam.c:1335
Oid * ii_ExclusionOps
Definition: execnodes.h:165
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
void * palloc(Size size)
Definition: mcxt.c:950
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define elog(elevel,...)
Definition: elog.h:214
int i
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:46
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:84
void tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
Definition: tuplesort.c:1687
void heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate)
Definition: heapam.c:2097
TM_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: heapam.c:2442
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
static void heapam_index_validate_scan(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, Snapshot snapshot, ValidateIndexState *state)
void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode)
Definition: heapam.c:1219
#define PG_FUNCTION_ARGS
Definition: fmgr.h:193
#define PROGRESS_CLUSTER_HEAP_BLKS_SCANNED
Definition: progress.h:61
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define PROGRESS_CLUSTER_TOTAL_HEAP_BLKS
Definition: progress.h:60
static bool heapam_relation_needs_toast_table(Relation rel)
HeapTupleTableSlot base
Definition: tuptable.h:259
void heap_page_prune_opt(Relation relation, Buffer buffer)
Definition: pruneheap.c:87
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1407
static bool heapam_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin, double *liverows, double *deadrows, TupleTableSlot *slot)
#define TransactionIdIsValid(xid)
Definition: transam.h:41
RewriteState begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin, TransactionId freeze_xid, MultiXactId cutoff_multi)
Definition: rewriteheap.c:237
static BlockNumber heapam_scan_get_blocks_done(HeapScanDesc hscan)
void tuplesort_end(Tuplesortstate *state)
Definition: tuplesort.c:1445
bool traversed
Definition: tableam.h:128
HeapTupleData tupdata
Definition: tuptable.h:253
#define PROGRESS_CLUSTER_PHASE
Definition: progress.h:56
ItemPointerData tts_tid
Definition: tuptable.h:130
int Buffer
Definition: buf.h:23
void smgrimmedsync(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:643
void rewrite_heap_tuple(RewriteState state, HeapTuple old_tuple, HeapTuple new_tuple)
Definition: rewriteheap.c:363
#define RelationGetRelid(relation)
Definition: rel.h:456
LockWaitPolicy
Definition: lockoptions.h:36
TupleTableSlot * ExecStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1322
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
static bool heapam_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno, BufferAccessStrategy bstrategy)
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
bool index_insert(Relation indexRelation, Datum *values, bool *isnull, ItemPointer heap_t_ctid, Relation heapRelation, IndexUniqueCheck checkUnique, IndexInfo *indexInfo)
Definition: indexam.c:176
IndexScanDesc index_beginscan(Relation heapRelation, Relation indexRelation, Snapshot snapshot, int nkeys, int norderbys)
Definition: indexam.c:203
#define ItemPointerCopy(fromPointer, toPointer)
Definition: itemptr.h:161
void(* IndexBuildCallback)(Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
Definition: tableam.h:145
IndexFetchTableData xs_base
Definition: heapam.h:80
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)
void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid)
Definition: heapam.c:1661
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:277
const TableAmRoutine * GetHeapamTableAmRoutine(void)
void log_smgrcreate(const RelFileNode *rnode, ForkNumber forkNum)
Definition: storage.c:175