PostgreSQL Source Code  git master
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/amapi.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "catalog/index.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "optimizer/paths.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "tcop/tcopprot.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVPagePruneState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 
#define FORCE_CHECK_PAGE()    (blkno == nblocks - 1 && should_attempt_truncation(vacrel))
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVPagePruneState LVPagePruneState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel, VacuumParams *params, bool aggressive)
 
static void lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, GlobalVisState *vistest, LVPagePruneState *prunestate)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static int lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer *vmbuffer)
 
static bool lazy_check_needs_freeze (Buffer buf, bool *hastup, LVRelState *vacrel)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_is_all_visible (LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
static void update_index_statistics (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static int dead_items_max_items (LVRelState *vacrel)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 93 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 98 of file vacuumlazy.c.

◆ FORCE_CHECK_PAGE

#define FORCE_CHECK_PAGE ( )     (blkno == nblocks - 1 && should_attempt_truncation(vacrel))

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 126 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 120 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 76 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 75 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 114 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 107 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 85 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 87 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 86 of file vacuumlazy.c.

Typedef Documentation

◆ LVPagePruneState

◆ LVRelState

typedef struct LVRelState LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 129 of file vacuumlazy.c.

130 {
137 } VacErrPhase;
VacErrPhase
Definition: vacuumlazy.c:130
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition: vacuumlazy.c:132
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition: vacuumlazy.c:133
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition: vacuumlazy.c:136
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition: vacuumlazy.c:135
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition: vacuumlazy.c:134
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition: vacuumlazy.c:131

Function Documentation

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool lock_waiter_detected 
)
static

Definition at line 2705 of file vacuumlazy.c.

2706 {
2707  BlockNumber blkno;
2708  BlockNumber prefetchedUntil;
2709  instr_time starttime;
2710 
2711  /* Initialize the starttime if we check for conflicting lock requests */
2712  INSTR_TIME_SET_CURRENT(starttime);
2713 
2714  /*
2715  * Start checking blocks at what we believe relation end to be and move
2716  * backwards. (Strange coding of loop control is needed because blkno is
2717  * unsigned.) To make the scan faster, we prefetch a few blocks at a time
2718  * in forward direction, so that OS-level readahead can kick in.
2719  */
2720  blkno = vacrel->rel_pages;
2722  "prefetch size must be power of 2");
2723  prefetchedUntil = InvalidBlockNumber;
2724  while (blkno > vacrel->nonempty_pages)
2725  {
2726  Buffer buf;
2727  Page page;
2728  OffsetNumber offnum,
2729  maxoff;
2730  bool hastup;
2731 
2732  /*
2733  * Check if another process requests a lock on our relation. We are
2734  * holding an AccessExclusiveLock here, so they will be waiting. We
2735  * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
2736  * only check if that interval has elapsed once every 32 blocks to
2737  * keep the number of system calls and actual shared lock table
2738  * lookups to a minimum.
2739  */
2740  if ((blkno % 32) == 0)
2741  {
2742  instr_time currenttime;
2743  instr_time elapsed;
2744 
2745  INSTR_TIME_SET_CURRENT(currenttime);
2746  elapsed = currenttime;
2747  INSTR_TIME_SUBTRACT(elapsed, starttime);
2748  if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
2750  {
2752  {
2753  ereport(vacrel->verbose ? INFO : DEBUG2,
2754  (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
2755  vacrel->relname)));
2756 
2757  *lock_waiter_detected = true;
2758  return blkno;
2759  }
2760  starttime = currenttime;
2761  }
2762  }
2763 
2764  /*
2765  * We don't insert a vacuum delay point here, because we have an
2766  * exclusive lock on the table which we want to hold for as short a
2767  * time as possible. We still need to check for interrupts however.
2768  */
2770 
2771  blkno--;
2772 
2773  /* If we haven't prefetched this lot yet, do so now. */
2774  if (prefetchedUntil > blkno)
2775  {
2776  BlockNumber prefetchStart;
2777  BlockNumber pblkno;
2778 
2779  prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
2780  for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
2781  {
2782  PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
2784  }
2785  prefetchedUntil = prefetchStart;
2786  }
2787 
2788  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
2789  vacrel->bstrategy);
2790 
2791  /* In this phase we only need shared access to the buffer */
2793 
2794  page = BufferGetPage(buf);
2795 
2796  if (PageIsNew(page) || PageIsEmpty(page))
2797  {
2799  continue;
2800  }
2801 
2802  hastup = false;
2803  maxoff = PageGetMaxOffsetNumber(page);
2804  for (offnum = FirstOffsetNumber;
2805  offnum <= maxoff;
2806  offnum = OffsetNumberNext(offnum))
2807  {
2808  ItemId itemid;
2809 
2810  itemid = PageGetItemId(page, offnum);
2811 
2812  /*
2813  * Note: any non-unused item should be taken as a reason to keep
2814  * this page. Even an LP_DEAD item makes truncation unsafe, since
2815  * we must not have cleaned out its index entries.
2816  */
2817  if (ItemIdIsUsed(itemid))
2818  {
2819  hastup = true;
2820  break; /* can stop scanning */
2821  }
2822  } /* scan along page */
2823 
2825 
2826  /* Done scanning if we found a tuple here */
2827  if (hastup)
2828  return blkno + 1;
2829  }
2830 
2831  /*
2832  * If we fall out of the loop, all the previously-thought-to-be-empty
2833  * pages still are; we need not bother to look at the last known-nonempty
2834  * page.
2835  */
2836  return vacrel->nonempty_pages;
2837 }
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:587
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3780
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3996
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:741
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
@ RBM_NORMAL
Definition: bufmgr.h:39
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
Pointer Page
Definition: bufpage.h:78
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:356
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:234
#define PageIsEmpty(page)
Definition: bufpage.h:221
#define PageIsNew(page)
Definition: bufpage.h:228
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:918
int errmsg(const char *fmt,...)
Definition: elog.c:904
#define DEBUG2
Definition: elog.h:23
#define INFO
Definition: elog.h:28
#define ereport(elevel,...)
Definition: elog.h:143
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:156
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:170
struct timeval instr_time
Definition: instr_time.h:150
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:205
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:346
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:120
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
static char * buf
Definition: pg_test_fsync.c:70
@ MAIN_FORKNUM
Definition: relpath.h:43
bool verbose
Definition: vacuumlazy.c:178
BlockNumber nonempty_pages
Definition: vacuumlazy.c:197
Relation rel
Definition: vacuumlazy.c:142
BlockNumber rel_pages
Definition: vacuumlazy.c:190
BufferAccessStrategy bstrategy
Definition: vacuumlazy.c:157
char * relname
Definition: vacuumlazy.c:173
#define PREFETCH_SIZE
Definition: vacuumlazy.c:120
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:85

References AccessExclusiveLock, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BufferGetPage, CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelState::nonempty_pages, OffsetNumberNext, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, PageIsNew, PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelState::rel, LVRelState::rel_pages, LVRelState::relname, StaticAssertStmt, UnlockReleaseBuffer(), VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and LVRelState::verbose.

Referenced by lazy_truncate_heap().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 2887 of file vacuumlazy.c.

2888 {
2889  VacDeadItems *dead_items;
2890  int max_items;
2891 
2892  max_items = dead_items_max_items(vacrel);
2893  Assert(max_items >= MaxHeapTuplesPerPage);
2894 
2895  /*
2896  * Initialize state for a parallel vacuum. As of now, only one worker can
2897  * be used for an index, so we invoke parallelism only if there are at
2898  * least two indexes on a table.
2899  */
2900  if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
2901  {
2902  /*
2903  * Since parallel workers cannot access data in temporary tables, we
2904  * can't perform parallel vacuum on them.
2905  */
2906  if (RelationUsesLocalBuffers(vacrel->rel))
2907  {
2908  /*
2909  * Give warning only if the user explicitly tries to perform a
2910  * parallel vacuum on the temporary table.
2911  */
2912  if (nworkers > 0)
2913  ereport(WARNING,
2914  (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
2915  vacrel->relname)));
2916  }
2917  else
2918  vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
2919  vacrel->nindexes, nworkers,
2920  max_items,
2921  vacrel->verbose ? INFO : DEBUG2,
2922  vacrel->bstrategy);
2923 
2924  /* If parallel mode started, dead_items space is allocated in DSM */
2925  if (ParallelVacuumIsActive(vacrel))
2926  {
2927  vacrel->dead_items = parallel_vacuum_get_dead_items(vacrel->pvs);
2928  return;
2929  }
2930  }
2931 
2932  /* Serial VACUUM case */
2933  dead_items = (VacDeadItems *) palloc(vac_max_items_to_alloc_size(max_items));
2934  dead_items->max_items = max_items;
2935  dead_items->num_items = 0;
2936 
2937  vacrel->dead_items = dead_items;
2938 }
#define WARNING
Definition: elog.h:30
#define MaxHeapTuplesPerPage
Definition: htup_details.h:568
Assert(fmt[strlen(fmt) - 1] !='\n')
void * palloc(Size size)
Definition: mcxt.c:1062
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:611
ParallelVacuumState * pvs
Definition: vacuumlazy.c:158
int nindexes
Definition: vacuumlazy.c:144
Relation * indrels
Definition: vacuumlazy.c:143
VacDeadItems * dead_items
Definition: vacuumlazy.c:189
bool do_index_vacuuming
Definition: vacuumlazy.c:152
int max_items
Definition: vacuum.h:243
int num_items
Definition: vacuum.h:244
Size vac_max_items_to_alloc_size(int max_items)
Definition: vacuum.c:2321
#define ParallelVacuumIsActive(vacrel)
Definition: vacuumlazy.c:126
static int dead_items_max_items(LVRelState *vacrel)
Definition: vacuumlazy.c:2848
VacDeadItems * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int max_items, int elevel, BufferAccessStrategy bstrategy)

References Assert(), LVRelState::bstrategy, LVRelState::dead_items, dead_items_max_items(), DEBUG2, LVRelState::do_index_vacuuming, ereport, errmsg(), LVRelState::indrels, INFO, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::nindexes, VacDeadItems::num_items, palloc(), parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, LVRelState::pvs, LVRelState::rel, RelationUsesLocalBuffers, LVRelState::relname, vac_max_items_to_alloc_size(), LVRelState::verbose, and WARNING.

Referenced by lazy_scan_heap().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 2944 of file vacuumlazy.c.

2945 {
2946  if (!ParallelVacuumIsActive(vacrel))
2947  {
2948  /* Don't bother with pfree here */
2949  return;
2950  }
2951 
2952  /* End parallel mode */
2953  parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
2954  vacrel->pvs = NULL;
2955 }
IndexBulkDeleteResult ** indstats
Definition: vacuumlazy.c:203
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)

References LVRelState::indstats, parallel_vacuum_end(), ParallelVacuumIsActive, and LVRelState::pvs.

Referenced by lazy_scan_heap().

◆ dead_items_max_items()

static int dead_items_max_items ( LVRelState vacrel)
static

Definition at line 2848 of file vacuumlazy.c.

2849 {
2850  int64 max_items;
2851  int vac_work_mem = IsAutoVacuumWorkerProcess() &&
2852  autovacuum_work_mem != -1 ?
2854 
2855  if (vacrel->nindexes > 0)
2856  {
2857  BlockNumber rel_pages = vacrel->rel_pages;
2858 
2859  max_items = MAXDEADITEMS(vac_work_mem * 1024L);
2860  max_items = Min(max_items, INT_MAX);
2861  max_items = Min(max_items, MAXDEADITEMS(MaxAllocSize));
2862 
2863  /* curious coding here to ensure the multiplication can't overflow */
2864  if ((BlockNumber) (max_items / MaxHeapTuplesPerPage) > rel_pages)
2865  max_items = rel_pages * MaxHeapTuplesPerPage;
2866 
2867  /* stay sane if small maintenance_work_mem */
2868  max_items = Max(max_items, MaxHeapTuplesPerPage);
2869  }
2870  else
2871  {
2872  /* One-pass case only stores a single heap page's TIDs at a time */
2873  max_items = MaxHeapTuplesPerPage;
2874  }
2875 
2876  return (int) max_items;
2877 }
int autovacuum_work_mem
Definition: autovacuum.c:116
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3411
#define Min(x, y)
Definition: c.h:986
#define Max(x, y)
Definition: c.h:980
int maintenance_work_mem
Definition: globals.c:126
#define MaxAllocSize
Definition: memutils.h:40
#define MAXDEADITEMS(avail_mem)
Definition: vacuum.h:250

References autovacuum_work_mem, IsAutoVacuumWorkerProcess(), maintenance_work_mem, Max, MaxAllocSize, MAXDEADITEMS, MaxHeapTuplesPerPage, Min, LVRelState::nindexes, and LVRelState::rel_pages.

Referenced by dead_items_alloc().

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( LVRelState vacrel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool all_frozen 
)
static

Definition at line 2969 of file vacuumlazy.c.

2972 {
2973  Page page = BufferGetPage(buf);
2975  OffsetNumber offnum,
2976  maxoff;
2977  bool all_visible = true;
2978 
2979  *visibility_cutoff_xid = InvalidTransactionId;
2980  *all_frozen = true;
2981 
2982  maxoff = PageGetMaxOffsetNumber(page);
2983  for (offnum = FirstOffsetNumber;
2984  offnum <= maxoff && all_visible;
2985  offnum = OffsetNumberNext(offnum))
2986  {
2987  ItemId itemid;
2988  HeapTupleData tuple;
2989 
2990  /*
2991  * Set the offset number so that we can display it along with any
2992  * error that occurred while processing this tuple.
2993  */
2994  vacrel->offnum = offnum;
2995  itemid = PageGetItemId(page, offnum);
2996 
2997  /* Unused or redirect line pointers are of no interest */
2998  if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
2999  continue;
3000 
3001  ItemPointerSet(&(tuple.t_self), blockno, offnum);
3002 
3003  /*
3004  * Dead line pointers can have index pointers pointing to them. So
3005  * they can't be treated as visible
3006  */
3007  if (ItemIdIsDead(itemid))
3008  {
3009  all_visible = false;
3010  *all_frozen = false;
3011  break;
3012  }
3013 
3014  Assert(ItemIdIsNormal(itemid));
3015 
3016  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3017  tuple.t_len = ItemIdGetLength(itemid);
3018  tuple.t_tableOid = RelationGetRelid(vacrel->rel);
3019 
3020  switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf))
3021  {
3022  case HEAPTUPLE_LIVE:
3023  {
3024  TransactionId xmin;
3025 
3026  /* Check comments in lazy_scan_prune. */
3028  {
3029  all_visible = false;
3030  *all_frozen = false;
3031  break;
3032  }
3033 
3034  /*
3035  * The inserter definitely committed. But is it old enough
3036  * that everyone sees it as committed?
3037  */
3038  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3039  if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin))
3040  {
3041  all_visible = false;
3042  *all_frozen = false;
3043  break;
3044  }
3045 
3046  /* Track newest xmin on page. */
3047  if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
3048  *visibility_cutoff_xid = xmin;
3049 
3050  /* Check whether this tuple is already frozen or not */
3051  if (all_visible && *all_frozen &&
3053  *all_frozen = false;
3054  }
3055  break;
3056 
3057  case HEAPTUPLE_DEAD:
3061  {
3062  all_visible = false;
3063  *all_frozen = false;
3064  break;
3065  }
3066  default:
3067  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3068  break;
3069  }
3070  } /* scan along page */
3071 
3072  /* Clear the offset information once we have processed the given page. */
3073  vacrel->offnum = InvalidOffsetNumber;
3074 
3075  return all_visible;
3076 }
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2748
#define PageGetItem(page, itemId)
Definition: bufpage.h:339
uint32 TransactionId
Definition: c.h:587
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7037
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:97
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:98
@ HEAPTUPLE_LIVE
Definition: heapam.h:96
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:99
@ HEAPTUPLE_DEAD
Definition: heapam.h:95
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:308
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:319
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
#define InvalidOffsetNumber
Definition: off.h:26
#define RelationGetRelid(relation)
Definition: rel.h:478
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
TransactionId OldestXmin
Definition: vacuumlazy.c:166
OffsetNumber offnum
Definition: vacuumlazy.c:176
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:334
#define InvalidTransactionId
Definition: transam.h:31

References Assert(), buf, BufferGetBlockNumber(), BufferGetPage, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, LVRelState::offnum, OffsetNumberNext, LVRelState::OldestXmin, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), and TransactionIdPrecedes().

Referenced by lazy_scan_prune(), and lazy_vacuum_heap_page().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 296 of file vacuumlazy.c.

298 {
299  LVRelState *vacrel;
300  bool verbose,
301  instrument;
302  PGRUsage ru0;
303  TimestampTz starttime = 0;
304  WalUsage walusage_start = pgWalUsage;
305  WalUsage walusage = {0, 0, 0};
306  long secs;
307  int usecs;
308  double read_rate,
309  write_rate;
310  bool aggressive; /* should we scan all unfrozen pages? */
311  bool scanned_all_unfrozen; /* actually scanned all such pages? */
312  char **indnames = NULL;
313  TransactionId xidFullScanLimit;
314  MultiXactId mxactFullScanLimit;
315  BlockNumber new_rel_pages;
316  BlockNumber new_rel_allvisible;
317  double new_live_tuples;
318  TransactionId new_frozen_xid;
319  MultiXactId new_min_multi;
320  ErrorContextCallback errcallback;
321  PgStat_Counter startreadtime = 0;
322  PgStat_Counter startwritetime = 0;
323  TransactionId OldestXmin;
324  TransactionId FreezeLimit;
325  MultiXactId MultiXactCutoff;
326 
327  verbose = (params->options & VACOPT_VERBOSE) != 0;
328  instrument = (verbose || (IsAutoVacuumWorkerProcess() &&
329  params->log_min_duration >= 0));
330  if (instrument)
331  {
332  pg_rusage_init(&ru0);
333  starttime = GetCurrentTimestamp();
334  if (track_io_timing)
335  {
336  startreadtime = pgStatBlockReadTime;
337  startwritetime = pgStatBlockWriteTime;
338  }
339  }
340 
342  RelationGetRelid(rel));
343 
345  params->freeze_min_age,
346  params->freeze_table_age,
347  params->multixact_freeze_min_age,
349  &OldestXmin, &FreezeLimit, &xidFullScanLimit,
350  &MultiXactCutoff, &mxactFullScanLimit);
351 
352  /*
353  * We request an aggressive scan if the table's frozen Xid is now older
354  * than or equal to the requested Xid full-table scan limit; or if the
355  * table's minimum MultiXactId is older than or equal to the requested
356  * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
357  */
358  aggressive = TransactionIdPrecedesOrEquals(rel->rd_rel->relfrozenxid,
359  xidFullScanLimit);
360  aggressive |= MultiXactIdPrecedesOrEquals(rel->rd_rel->relminmxid,
361  mxactFullScanLimit);
363  aggressive = true;
364 
365  /*
366  * Setup error traceback support for ereport() first. The idea is to set
367  * up an error context callback to display additional information on any
368  * error during a vacuum. During different phases of vacuum, we update
369  * the state so that the error context callback always display current
370  * information.
371  *
372  * Copy the names of heap rel into local memory for error reporting
373  * purposes, too. It isn't always safe to assume that we can get the name
374  * of each rel. It's convenient for code in lazy_scan_heap to always use
375  * these temp copies.
376  */
377  vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
379  vacrel->relname = pstrdup(RelationGetRelationName(rel));
380  vacrel->indname = NULL;
382  vacrel->verbose = verbose;
383  errcallback.callback = vacuum_error_callback;
384  errcallback.arg = vacrel;
385  errcallback.previous = error_context_stack;
386  error_context_stack = &errcallback;
387  if (verbose)
388  {
390  if (aggressive)
391  ereport(INFO,
392  (errmsg("aggressively vacuuming \"%s.%s.%s\"",
394  vacrel->relnamespace, vacrel->relname)));
395  else
396  ereport(INFO,
397  (errmsg("vacuuming \"%s.%s.%s\"",
399  vacrel->relnamespace, vacrel->relname)));
400  }
401 
402  /* Set up high level stuff about rel and its indexes */
403  vacrel->rel = rel;
404  vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes,
405  &vacrel->indrels);
406  if (instrument && vacrel->nindexes > 0)
407  {
408  /* Copy index names used by instrumentation (not error reporting) */
409  indnames = palloc(sizeof(char *) * vacrel->nindexes);
410  for (int i = 0; i < vacrel->nindexes; i++)
411  indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
412  }
413 
414  /*
415  * The index_cleanup param either disables index vacuuming and cleanup or
416  * forces it to go ahead when we would otherwise apply the index bypass
417  * optimization. The default is 'auto', which leaves the final decision
418  * up to lazy_vacuum().
419  *
420  * The truncate param allows user to avoid attempting relation truncation,
421  * though it can't force truncation to happen.
422  */
425  params->truncate != VACOPTVALUE_AUTO);
426  vacrel->failsafe_active = false;
427  vacrel->consider_bypass_optimization = true;
428  vacrel->do_index_vacuuming = true;
429  vacrel->do_index_cleanup = true;
430  vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
431  if (params->index_cleanup == VACOPTVALUE_DISABLED)
432  {
433  /* Force disable index vacuuming up-front */
434  vacrel->do_index_vacuuming = false;
435  vacrel->do_index_cleanup = false;
436  }
437  else if (params->index_cleanup == VACOPTVALUE_ENABLED)
438  {
439  /* Force index vacuuming. Note that failsafe can still bypass. */
440  vacrel->consider_bypass_optimization = false;
441  }
442  else
443  {
444  /* Default/auto, make all decisions dynamically */
446  }
447 
448  vacrel->bstrategy = bstrategy;
449  vacrel->relfrozenxid = rel->rd_rel->relfrozenxid;
450  vacrel->relminmxid = rel->rd_rel->relminmxid;
451  vacrel->old_live_tuples = rel->rd_rel->reltuples;
452 
453  /* Set cutoffs for entire VACUUM */
454  vacrel->OldestXmin = OldestXmin;
455  vacrel->FreezeLimit = FreezeLimit;
456  vacrel->MultiXactCutoff = MultiXactCutoff;
457 
458  /*
459  * Call lazy_scan_heap to perform all required heap pruning, index
460  * vacuuming, and heap vacuuming (plus related processing)
461  */
462  lazy_scan_heap(vacrel, params, aggressive);
463 
464  /* Done with indexes */
465  vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
466 
467  /*
468  * Compute whether we actually scanned the all unfrozen pages. If we did,
469  * we can adjust relfrozenxid and relminmxid.
470  *
471  * NB: We need to check this before truncating the relation, because that
472  * will change ->rel_pages.
473  */
474  if ((vacrel->scanned_pages + vacrel->frozenskipped_pages)
475  < vacrel->rel_pages)
476  {
477  Assert(!aggressive);
478  scanned_all_unfrozen = false;
479  }
480  else
481  scanned_all_unfrozen = true;
482 
483  /*
484  * Optionally truncate the relation.
485  */
486  if (should_attempt_truncation(vacrel))
487  {
489  vacrel->nonempty_pages,
491  lazy_truncate_heap(vacrel);
492  }
493 
494  /* Pop the error context stack */
495  error_context_stack = errcallback.previous;
496 
497  /* Report that we are now doing final cleanup */
500 
501  /*
502  * Update statistics in pg_class.
503  *
504  * In principle new_live_tuples could be -1 indicating that we (still)
505  * don't know the tuple count. In practice that probably can't happen,
506  * since we'd surely have scanned some pages if the table is new and
507  * nonempty.
508  *
509  * For safety, clamp relallvisible to be not more than what we're setting
510  * relpages to.
511  *
512  * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
513  * since then we don't know for certain that all tuples have a newer xmin.
514  */
515  new_rel_pages = vacrel->rel_pages;
516  new_live_tuples = vacrel->new_live_tuples;
517 
518  visibilitymap_count(rel, &new_rel_allvisible, NULL);
519  if (new_rel_allvisible > new_rel_pages)
520  new_rel_allvisible = new_rel_pages;
521 
522  new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
523  new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
524 
526  new_rel_pages,
527  new_live_tuples,
528  new_rel_allvisible,
529  vacrel->nindexes > 0,
530  new_frozen_xid,
531  new_min_multi,
532  false);
533 
534  /*
535  * Report results to the stats collector, too.
536  *
537  * Deliberately avoid telling the stats collector about LP_DEAD items that
538  * remain in the table due to VACUUM bypassing index and heap vacuuming.
539  * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
540  * It seems like a good idea to err on the side of not vacuuming again too
541  * soon in cases where the failsafe prevented significant amounts of heap
542  * vacuuming.
543  */
545  rel->rd_rel->relisshared,
546  Max(new_live_tuples, 0),
547  vacrel->new_dead_tuples);
549 
550  if (instrument)
551  {
552  TimestampTz endtime = GetCurrentTimestamp();
553 
554  if (verbose || params->log_min_duration == 0 ||
555  TimestampDifferenceExceeds(starttime, endtime,
556  params->log_min_duration))
557  {
559  char *msgfmt;
560  BlockNumber orig_rel_pages;
561 
562  TimestampDifference(starttime, endtime, &secs, &usecs);
563 
564  memset(&walusage, 0, sizeof(WalUsage));
565  WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
566 
567  read_rate = 0;
568  write_rate = 0;
569  if ((secs > 0) || (usecs > 0))
570  {
571  read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
572  (secs + usecs / 1000000.0);
573  write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
574  (secs + usecs / 1000000.0);
575  }
576 
578  if (verbose)
579  {
580  /*
581  * Aggressiveness already reported earlier, in dedicated
582  * VACUUM VERBOSE ereport
583  */
584  Assert(!params->is_wraparound);
585  msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
586  }
587  else if (params->is_wraparound)
588  {
589  /*
590  * While it's possible for a VACUUM to be both is_wraparound
591  * and !aggressive, that's just a corner-case -- is_wraparound
592  * implies aggressive. Produce distinct output for the corner
593  * case all the same, just in case.
594  */
595  if (aggressive)
596  msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
597  else
598  msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
599  }
600  else
601  {
602  if (aggressive)
603  msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
604  else
605  msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
606  }
607  appendStringInfo(&buf, msgfmt,
609  vacrel->relnamespace,
610  vacrel->relname,
611  vacrel->num_index_scans);
612  appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
613  vacrel->pages_removed,
614  vacrel->rel_pages,
615  vacrel->pinskipped_pages,
616  vacrel->frozenskipped_pages);
618  _("tuples: %lld removed, %lld remain, %lld are dead but not yet removable, oldest xmin: %u\n"),
619  (long long) vacrel->tuples_deleted,
620  (long long) vacrel->new_rel_tuples,
621  (long long) vacrel->new_dead_tuples,
622  OldestXmin);
623  orig_rel_pages = vacrel->rel_pages + vacrel->pages_removed;
624  if (orig_rel_pages > 0)
625  {
626  if (vacrel->do_index_vacuuming)
627  {
628  if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
629  appendStringInfoString(&buf, _("index scan not needed: "));
630  else
631  appendStringInfoString(&buf, _("index scan needed: "));
632 
633  msgfmt = _("%u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n");
634  }
635  else
636  {
637  if (!vacrel->failsafe_active)
638  appendStringInfoString(&buf, _("index scan bypassed: "));
639  else
640  appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
641 
642  msgfmt = _("%u pages from table (%.2f%% of total) have %lld dead item identifiers\n");
643  }
644  appendStringInfo(&buf, msgfmt,
645  vacrel->lpdead_item_pages,
646  100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
647  (long long) vacrel->lpdead_items);
648  }
649  for (int i = 0; i < vacrel->nindexes; i++)
650  {
651  IndexBulkDeleteResult *istat = vacrel->indstats[i];
652 
653  if (!istat)
654  continue;
655 
657  _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
658  indnames[i],
659  istat->num_pages,
660  istat->pages_newly_deleted,
661  istat->pages_deleted,
662  istat->pages_free);
663  }
664  if (track_io_timing)
665  {
666  double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
667  double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
668 
669  appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
670  read_ms, write_ms);
671  }
672  appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
673  read_rate, write_rate);
675  _("buffer usage: %lld hits, %lld misses, %lld dirtied\n"),
676  (long long) VacuumPageHit,
677  (long long) VacuumPageMiss,
678  (long long) VacuumPageDirty);
680  _("WAL usage: %lld records, %lld full page images, %llu bytes\n"),
681  (long long) walusage.wal_records,
682  (long long) walusage.wal_fpi,
683  (unsigned long long) walusage.wal_bytes);
684  appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
685 
686  ereport(verbose ? INFO : LOG,
687  (errmsg_internal("%s", buf.data)));
688  pfree(buf.data);
689  }
690  }
691 
692  /* Cleanup index statistics and index names */
693  for (int i = 0; i < vacrel->nindexes; i++)
694  {
695  if (vacrel->indstats[i])
696  pfree(vacrel->indstats[i]);
697 
698  if (instrument)
699  pfree(indnames[i]);
700  }
701 }
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1656
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1711
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1580
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
bool track_io_timing
Definition: bufmgr.c:135
TransactionId MultiXactId
Definition: c.h:597
int64 TimestampTz
Definition: timestamp.h:39
char * get_database_name(Oid dbid)
Definition: dbcommands.c:2113
int errmsg_internal(const char *fmt,...)
Definition: elog.c:991
ErrorContextCallback * error_context_stack
Definition: elog.c:93
#define _(x)
Definition: elog.c:89
#define LOG
Definition: elog.h:25
int64 VacuumPageHit
Definition: globals.c:147
int64 VacuumPageMiss
Definition: globals.c:148
int64 VacuumPageDirty
Definition: globals.c:149
Oid MyDatabaseId
Definition: globals.c:88
WalUsage pgWalUsage
Definition: instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition: instrument.c:274
int i
Definition: isn.c:73
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3316
char * pstrdup(const char *in)
Definition: mcxt.c:1299
void pfree(void *pointer)
Definition: mcxt.c:1169
void * palloc0(Size size)
Definition: mcxt.c:1093
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3173
#define InvalidMultiXactId
Definition: multixact.h:24
static int verbose
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
PgStat_Counter pgStatBlockReadTime
Definition: pgstat.c:243
PgStat_Counter pgStatBlockWriteTime
Definition: pgstat.c:244
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
Definition: pgstat.c:1651
int64 PgStat_Counter
Definition: pgstat.h:95
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:35
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define RelationGetRelationName(relation)
Definition: rel.h:512
#define RelationGetNamespace(relation)
Definition: rel.h:519
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:176
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
struct ErrorContextCallback * previous
Definition: elog.h:232
void(* callback)(void *arg)
Definition: elog.h:233
BlockNumber pages_deleted
Definition: genam.h:81
BlockNumber pages_newly_deleted
Definition: genam.h:80
BlockNumber pages_free
Definition: genam.h:82
BlockNumber num_pages
Definition: genam.h:76
MultiXactId relminmxid
Definition: vacuumlazy.c:162
int64 tuples_deleted
Definition: vacuumlazy.c:207
MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:169
double old_live_tuples
Definition: vacuumlazy.c:163
bool do_rel_truncate
Definition: vacuumlazy.c:154
BlockNumber scanned_pages
Definition: vacuumlazy.c:191
bool failsafe_active
Definition: vacuumlazy.c:147
int num_index_scans
Definition: vacuumlazy.c:206
double new_live_tuples
Definition: vacuumlazy.c:201
double new_rel_tuples
Definition: vacuumlazy.c:200
int64 new_dead_tuples
Definition: vacuumlazy.c:209
bool consider_bypass_optimization
Definition: vacuumlazy.c:149
TransactionId FreezeLimit
Definition: vacuumlazy.c:168
BlockNumber pinskipped_pages
Definition: vacuumlazy.c:192
BlockNumber pages_removed
Definition: vacuumlazy.c:195
char * relnamespace
Definition: vacuumlazy.c:172
BlockNumber frozenskipped_pages
Definition: vacuumlazy.c:193
int64 lpdead_items
Definition: vacuumlazy.c:208
BlockNumber lpdead_item_pages
Definition: vacuumlazy.c:196
bool do_index_cleanup
Definition: vacuumlazy.c:153
TransactionId relfrozenxid
Definition: vacuumlazy.c:161
VacErrPhase phase
Definition: vacuumlazy.c:177
char * indname
Definition: vacuumlazy.c:174
Form_pg_class rd_rel
Definition: rel.h:109
int freeze_table_age
Definition: vacuum.h:218
VacOptValue truncate
Definition: vacuum.h:228
bits32 options
Definition: vacuum.h:216
int freeze_min_age
Definition: vacuum.h:217
bool is_wraparound
Definition: vacuum.h:223
int multixact_freeze_min_age
Definition: vacuum.h:219
int multixact_freeze_table_age
Definition: vacuum.h:221
int log_min_duration
Definition: vacuum.h:224
VacOptValue index_cleanup
Definition: vacuum.h:227
uint64 wal_bytes
Definition: instrument.h:51
int64 wal_fpi
Definition: instrument.h:50
int64 wal_records
Definition: instrument.h:49
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:319
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool in_outer_xact)
Definition: vacuum.c:1313
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:2092
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:2135
void vacuum_set_xid_limits(Relation rel, int freeze_min_age, int freeze_table_age, int multixact_freeze_min_age, int multixact_freeze_table_age, TransactionId *oldestXmin, TransactionId *freezeLimit, TransactionId *xidFullScanLimit, MultiXactId *multiXactCutoff, MultiXactId *mxactFullScanLimit)
Definition: vacuum.c:964
#define VACOPT_VERBOSE
Definition: vacuum.h:185
@ VACOPTVALUE_AUTO
Definition: vacuum.h:203
@ VACOPTVALUE_ENABLED
Definition: vacuum.h:205
@ VACOPTVALUE_UNSPECIFIED
Definition: vacuum.h:202
@ VACOPTVALUE_DISABLED
Definition: vacuum.h:204
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition: vacuum.h:190
static void vacuum_error_callback(void *arg)
Definition: vacuumlazy.c:3117
static void lazy_truncate_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:2578
static bool should_attempt_truncation(LVRelState *vacrel)
Definition: vacuumlazy.c:2557
static void lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
Definition: vacuumlazy.c:740
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
Definition: vacuumlazy.c:3181
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)

References _, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert(), LVRelState::bstrategy, buf, ErrorContextCallback::callback, LVRelState::consider_bypass_optimization, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errmsg(), errmsg_internal(), error_context_stack, LVRelState::failsafe_active, VacuumParams::freeze_min_age, VacuumParams::freeze_table_age, LVRelState::FreezeLimit, LVRelState::frozenskipped_pages, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), i, VacuumParams::index_cleanup, LVRelState::indname, LVRelState::indrels, LVRelState::indstats, INFO, initStringInfo(), InvalidMultiXactId, InvalidOffsetNumber, InvalidTransactionId, VacuumParams::is_wraparound, IsAutoVacuumWorkerProcess(), lazy_scan_heap(), lazy_truncate_heap(), LOG, VacuumParams::log_min_duration, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, Max, VacuumParams::multixact_freeze_min_age, VacuumParams::multixact_freeze_table_age, LVRelState::MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyDatabaseId, LVRelState::new_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::nindexes, NoLock, LVRelState::nonempty_pages, LVRelState::num_index_scans, IndexBulkDeleteResult::num_pages, LVRelState::old_live_tuples, LVRelState::OldestXmin, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, LVRelState::pages_removed, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, LVRelState::phase, LVRelState::pinskipped_pages, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, pstrdup(), RelationData::rd_rel, LVRelState::rel, LVRelState::rel_pages, RelationGetNamespace, RelationGetRelationName, RelationGetRelid, LVRelState::relfrozenxid, LVRelState::relminmxid, LVRelState::relname, LVRelState::relnamespace, RowExclusiveLock, LVRelState::scanned_pages, should_attempt_truncation(), TimestampDifference(), TimestampDifferenceExceeds(), track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelState::tuples_deleted, update_vacuum_error_info(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_set_xid_limits(), VacuumPageDirty, VacuumPageHit, VacuumPageMiss, LVRelState::verbose, verbose, visibilitymap_count(), WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_records, and WalUsageAccumDiff().

◆ lazy_check_needs_freeze()

static bool lazy_check_needs_freeze ( Buffer  buf,
bool hastup,
LVRelState vacrel 
)
static

Definition at line 2297 of file vacuumlazy.c.

2298 {
2299  Page page = BufferGetPage(buf);
2300  OffsetNumber offnum,
2301  maxoff;
2302  HeapTupleHeader tupleheader;
2303 
2304  *hastup = false;
2305 
2306  /*
2307  * New and empty pages, obviously, don't contain tuples. We could make
2308  * sure that the page is registered in the FSM, but it doesn't seem worth
2309  * waiting for a cleanup lock just for that, especially because it's
2310  * likely that the pin holder will do so.
2311  */
2312  if (PageIsNew(page) || PageIsEmpty(page))
2313  return false;
2314 
2315  maxoff = PageGetMaxOffsetNumber(page);
2316  for (offnum = FirstOffsetNumber;
2317  offnum <= maxoff;
2318  offnum = OffsetNumberNext(offnum))
2319  {
2320  ItemId itemid;
2321 
2322  /*
2323  * Set the offset number so that we can display it along with any
2324  * error that occurred while processing this tuple.
2325  */
2326  vacrel->offnum = offnum;
2327  itemid = PageGetItemId(page, offnum);
2328 
2329  /* this should match hastup test in count_nondeletable_pages() */
2330  if (ItemIdIsUsed(itemid))
2331  *hastup = true;
2332 
2333  /* dead and redirect items never need freezing */
2334  if (!ItemIdIsNormal(itemid))
2335  continue;
2336 
2337  tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
2338 
2339  if (heap_tuple_needs_freeze(tupleheader, vacrel->FreezeLimit,
2340  vacrel->MultiXactCutoff, buf))
2341  break;
2342  } /* scan along page */
2343 
2344  /* Clear the offset information once we have processed the given page. */
2345  vacrel->offnum = InvalidOffsetNumber;
2346 
2347  return (offnum <= maxoff);
2348 }
bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
Definition: heapam.c:7090

References buf, BufferGetPage, FirstOffsetNumber, LVRelState::FreezeLimit, heap_tuple_needs_freeze(), InvalidOffsetNumber, ItemIdIsNormal, ItemIdIsUsed, LVRelState::MultiXactCutoff, LVRelState::offnum, OffsetNumberNext, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, and PageIsNew.

Referenced by lazy_scan_heap().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 2363 of file vacuumlazy.c.

2364 {
2365  /* Don't warn more than once per VACUUM */
2366  if (vacrel->failsafe_active)
2367  return true;
2368 
2370  vacrel->relminmxid)))
2371  {
2372  vacrel->failsafe_active = true;
2373 
2374  /* Disable index vacuuming, index cleanup, and heap rel truncation */
2375  vacrel->do_index_vacuuming = false;
2376  vacrel->do_index_cleanup = false;
2377  vacrel->do_rel_truncate = false;
2378 
2379  ereport(WARNING,
2380  (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2382  vacrel->relnamespace,
2383  vacrel->relname,
2384  vacrel->num_index_scans),
2385  errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
2386  errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2387  "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2388 
2389  /* Stop applying cost limits from this point on */
2390  VacuumCostActive = false;
2391  VacuumCostBalance = 0;
2392 
2393  return true;
2394  }
2395 
2396  return false;
2397 }
#define unlikely(x)
Definition: c.h:273
int errdetail(const char *fmt,...)
Definition: elog.c:1037
int errhint(const char *fmt,...)
Definition: elog.c:1151
bool VacuumCostActive
Definition: globals.c:152
int VacuumCostBalance
Definition: globals.c:151
bool vacuum_xid_failsafe_check(TransactionId relfrozenxid, MultiXactId relminmxid)
Definition: vacuum.c:1170

References LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errdetail(), errhint(), errmsg(), LVRelState::failsafe_active, get_database_name(), MyDatabaseId, LVRelState::num_index_scans, LVRelState::relfrozenxid, LVRelState::relminmxid, LVRelState::relname, LVRelState::relnamespace, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, and WARNING.

Referenced by lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 2403 of file vacuumlazy.c.

2404 {
2405  Assert(vacrel->nindexes > 0);
2406 
2407  /* Report that we are now cleaning up indexes */
2410 
2411  if (!ParallelVacuumIsActive(vacrel))
2412  {
2413  double reltuples = vacrel->new_rel_tuples;
2414  bool estimated_count =
2415  vacrel->tupcount_pages < vacrel->rel_pages;
2416 
2417  for (int idx = 0; idx < vacrel->nindexes; idx++)
2418  {
2419  Relation indrel = vacrel->indrels[idx];
2420  IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2421 
2422  vacrel->indstats[idx] =
2423  lazy_cleanup_one_index(indrel, istat, reltuples,
2424  estimated_count, vacrel);
2425  }
2426  }
2427  else
2428  {
2429  /* Outsource everything to parallel variant */
2431  vacrel->num_index_scans,
2432  (vacrel->tupcount_pages < vacrel->rel_pages));
2433  }
2434 }
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:33
BlockNumber tupcount_pages
Definition: vacuumlazy.c:194
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
Definition: vacuumlazy.c:2497
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)

References Assert(), idx(), LVRelState::indrels, LVRelState::indstats, lazy_cleanup_one_index(), LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::num_index_scans, parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelState::pvs, LVRelState::rel_pages, and LVRelState::tupcount_pages.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 2497 of file vacuumlazy.c.

2500 {
2501  IndexVacuumInfo ivinfo;
2502  LVSavedErrInfo saved_err_info;
2503 
2504  ivinfo.index = indrel;
2505  ivinfo.analyze_only = false;
2506  ivinfo.report_progress = false;
2507  ivinfo.estimated_count = estimated_count;
2508  ivinfo.message_level = DEBUG2;
2509 
2510  ivinfo.num_heap_tuples = reltuples;
2511  ivinfo.strategy = vacrel->bstrategy;
2512 
2513  /*
2514  * Update error traceback information.
2515  *
2516  * The index name is saved during this phase and restored immediately
2517  * after this phase. See vacuum_error_callback.
2518  */
2519  Assert(vacrel->indname == NULL);
2520  vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2521  update_vacuum_error_info(vacrel, &saved_err_info,
2524 
2525  istat = vac_cleanup_one_index(&ivinfo, istat);
2526 
2527  /* Revert to the previous phase information for error traceback */
2528  restore_vacuum_error_info(vacrel, &saved_err_info);
2529  pfree(vacrel->indname);
2530  vacrel->indname = NULL;
2531 
2532  return istat;
2533 }
Relation index
Definition: genam.h:46
double num_heap_tuples
Definition: genam.h:51
bool analyze_only
Definition: genam.h:47
BufferAccessStrategy strategy
Definition: genam.h:52
bool report_progress
Definition: genam.h:48
int message_level
Definition: genam.h:50
bool estimated_count
Definition: genam.h:49
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition: vacuum.c:2296
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
Definition: vacuumlazy.c:3200

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel,
VacuumParams params,
bool  aggressive 
)
static

Definition at line 740 of file vacuumlazy.c.

741 {
742  VacDeadItems *dead_items;
743  BlockNumber nblocks,
744  blkno,
745  next_unskippable_block,
746  next_failsafe_block,
747  next_fsm_block_to_vacuum;
748  Buffer vmbuffer = InvalidBuffer;
749  bool skipping_blocks;
750  const int initprog_index[] = {
754  };
755  int64 initprog_val[3];
756  GlobalVisState *vistest;
757 
758  nblocks = RelationGetNumberOfBlocks(vacrel->rel);
759  next_unskippable_block = 0;
760  next_failsafe_block = 0;
761  next_fsm_block_to_vacuum = 0;
762  vacrel->rel_pages = nblocks;
763  vacrel->scanned_pages = 0;
764  vacrel->pinskipped_pages = 0;
765  vacrel->frozenskipped_pages = 0;
766  vacrel->tupcount_pages = 0;
767  vacrel->pages_removed = 0;
768  vacrel->lpdead_item_pages = 0;
769  vacrel->nonempty_pages = 0;
770 
771  /* Initialize instrumentation counters */
772  vacrel->num_index_scans = 0;
773  vacrel->tuples_deleted = 0;
774  vacrel->lpdead_items = 0;
775  vacrel->new_dead_tuples = 0;
776  vacrel->num_tuples = 0;
777  vacrel->live_tuples = 0;
778 
779  vistest = GlobalVisTestFor(vacrel->rel);
780 
781  vacrel->indstats = (IndexBulkDeleteResult **)
782  palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
783 
784  /*
785  * Do failsafe precheck before calling dead_items_alloc. This ensures
786  * that parallel VACUUM won't be attempted when relfrozenxid is already
787  * dangerously old.
788  */
790 
791  /*
792  * Allocate the space for dead_items. Note that this handles parallel
793  * VACUUM initialization as part of allocating shared memory space used
794  * for dead_items.
795  */
796  dead_items_alloc(vacrel, params->nworkers);
797  dead_items = vacrel->dead_items;
798 
799  /* Report that we're scanning the heap, advertising total # of blocks */
800  initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
801  initprog_val[1] = nblocks;
802  initprog_val[2] = dead_items->max_items;
803  pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
804 
805  /*
806  * Except when aggressive is set, we want to skip pages that are
807  * all-visible according to the visibility map, but only when we can skip
808  * at least SKIP_PAGES_THRESHOLD consecutive pages. Since we're reading
809  * sequentially, the OS should be doing readahead for us, so there's no
810  * gain in skipping a page now and then; that's likely to disable
811  * readahead and so be counterproductive. Also, skipping even a single
812  * page means that we can't update relfrozenxid, so we only want to do it
813  * if we can skip a goodly number of pages.
814  *
815  * When aggressive is set, we can't skip pages just because they are
816  * all-visible, but we can still skip pages that are all-frozen, since
817  * such pages do not need freezing and do not affect the value that we can
818  * safely set for relfrozenxid or relminmxid.
819  *
820  * Before entering the main loop, establish the invariant that
821  * next_unskippable_block is the next block number >= blkno that we can't
822  * skip based on the visibility map, either all-visible for a regular scan
823  * or all-frozen for an aggressive scan. We set it to nblocks if there's
824  * no such block. We also set up the skipping_blocks flag correctly at
825  * this stage.
826  *
827  * Note: The value returned by visibilitymap_get_status could be slightly
828  * out-of-date, since we make this test before reading the corresponding
829  * heap page or locking the buffer. This is OK. If we mistakenly think
830  * that the page is all-visible or all-frozen when in fact the flag's just
831  * been cleared, we might fail to vacuum the page. It's easy to see that
832  * skipping a page when aggressive is not set is not a very big deal; we
833  * might leave some dead tuples lying around, but the next vacuum will
834  * find them. But even when aggressive *is* set, it's still OK if we miss
835  * a page whose all-frozen marking has just been cleared. Any new XIDs
836  * just added to that page are necessarily newer than the GlobalXmin we
837  * computed, so they'll have no effect on the value to which we can safely
838  * set relfrozenxid. A similar argument applies for MXIDs and relminmxid.
839  *
840  * We will scan the table's last page, at least to the extent of
841  * determining whether it has tuples or not, even if it should be skipped
842  * according to the above rules; except when we've already determined that
843  * it's not worth trying to truncate the table. This avoids having
844  * lazy_truncate_heap() take access-exclusive lock on the table to attempt
845  * a truncation that just fails immediately because there are tuples in
846  * the last page. This is worth avoiding mainly because such a lock must
847  * be replayed on any hot standby, where it can be disruptive.
848  */
849  if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
850  {
851  while (next_unskippable_block < nblocks)
852  {
853  uint8 vmstatus;
854 
855  vmstatus = visibilitymap_get_status(vacrel->rel,
856  next_unskippable_block,
857  &vmbuffer);
858  if (aggressive)
859  {
860  if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
861  break;
862  }
863  else
864  {
865  if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
866  break;
867  }
869  next_unskippable_block++;
870  }
871  }
872 
873  if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
874  skipping_blocks = true;
875  else
876  skipping_blocks = false;
877 
878  for (blkno = 0; blkno < nblocks; blkno++)
879  {
880  Buffer buf;
881  Page page;
882  bool all_visible_according_to_vm = false;
883  LVPagePruneState prunestate;
884 
885  /*
886  * Consider need to skip blocks. See note above about forcing
887  * scanning of last page.
888  */
889 #define FORCE_CHECK_PAGE() \
890  (blkno == nblocks - 1 && should_attempt_truncation(vacrel))
891 
893 
895  blkno, InvalidOffsetNumber);
896 
897  if (blkno == next_unskippable_block)
898  {
899  /* Time to advance next_unskippable_block */
900  next_unskippable_block++;
901  if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
902  {
903  while (next_unskippable_block < nblocks)
904  {
905  uint8 vmskipflags;
906 
907  vmskipflags = visibilitymap_get_status(vacrel->rel,
908  next_unskippable_block,
909  &vmbuffer);
910  if (aggressive)
911  {
912  if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
913  break;
914  }
915  else
916  {
917  if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
918  break;
919  }
921  next_unskippable_block++;
922  }
923  }
924 
925  /*
926  * We know we can't skip the current block. But set up
927  * skipping_blocks to do the right thing at the following blocks.
928  */
929  if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD)
930  skipping_blocks = true;
931  else
932  skipping_blocks = false;
933 
934  /*
935  * Normally, the fact that we can't skip this block must mean that
936  * it's not all-visible. But in an aggressive vacuum we know only
937  * that it's not all-frozen, so it might still be all-visible.
938  */
939  if (aggressive && VM_ALL_VISIBLE(vacrel->rel, blkno, &vmbuffer))
940  all_visible_according_to_vm = true;
941  }
942  else
943  {
944  /*
945  * The current block is potentially skippable; if we've seen a
946  * long enough run of skippable blocks to justify skipping it, and
947  * we're not forced to check it, then go ahead and skip.
948  * Otherwise, the page must be at least all-visible if not
949  * all-frozen, so we can set all_visible_according_to_vm = true.
950  */
951  if (skipping_blocks && !FORCE_CHECK_PAGE())
952  {
953  /*
954  * Tricky, tricky. If this is in aggressive vacuum, the page
955  * must have been all-frozen at the time we checked whether it
956  * was skippable, but it might not be any more. We must be
957  * careful to count it as a skipped all-frozen page in that
958  * case, or else we'll think we can't update relfrozenxid and
959  * relminmxid. If it's not an aggressive vacuum, we don't
960  * know whether it was all-frozen, so we have to recheck; but
961  * in this case an approximate answer is OK.
962  */
963  if (aggressive || VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
964  vacrel->frozenskipped_pages++;
965  continue;
966  }
967  all_visible_according_to_vm = true;
968  }
969 
971 
972  /*
973  * Regularly check if wraparound failsafe should trigger.
974  *
975  * There is a similar check inside lazy_vacuum_all_indexes(), but
976  * relfrozenxid might start to look dangerously old before we reach
977  * that point. This check also provides failsafe coverage for the
978  * one-pass strategy, and the two-pass strategy with the index_cleanup
979  * param set to 'off'.
980  */
981  if (blkno - next_failsafe_block >= FAILSAFE_EVERY_PAGES)
982  {
984  next_failsafe_block = blkno;
985  }
986 
987  /*
988  * Consider if we definitely have enough space to process TIDs on page
989  * already. If we are close to overrunning the available space for
990  * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
991  * this page.
992  */
993  Assert(dead_items->max_items >= MaxHeapTuplesPerPage);
994  if (dead_items->max_items - dead_items->num_items < MaxHeapTuplesPerPage)
995  {
996  /*
997  * Before beginning index vacuuming, we release any pin we may
998  * hold on the visibility map page. This isn't necessary for
999  * correctness, but we do it anyway to avoid holding the pin
1000  * across a lengthy, unrelated operation.
1001  */
1002  if (BufferIsValid(vmbuffer))
1003  {
1004  ReleaseBuffer(vmbuffer);
1005  vmbuffer = InvalidBuffer;
1006  }
1007 
1008  /* Perform a round of index and heap vacuuming */
1009  vacrel->consider_bypass_optimization = false;
1010  lazy_vacuum(vacrel);
1011 
1012  /*
1013  * Vacuum the Free Space Map to make newly-freed space visible on
1014  * upper-level FSM pages. Note we have not yet processed blkno.
1015  */
1016  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1017  blkno);
1018  next_fsm_block_to_vacuum = blkno;
1019 
1020  /* Report that we are once again scanning the heap */
1023  }
1024 
1025  /*
1026  * Set up visibility map page as needed.
1027  *
1028  * Pin the visibility map page in case we need to mark the page
1029  * all-visible. In most cases this will be very cheap, because we'll
1030  * already have the correct page pinned anyway. However, it's
1031  * possible that (a) next_unskippable_block is covered by a different
1032  * VM page than the current block or (b) we released our pin and did a
1033  * cycle of index vacuuming.
1034  */
1035  visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
1036 
1037  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno,
1038  RBM_NORMAL, vacrel->bstrategy);
1039 
1040  /*
1041  * We need buffer cleanup lock so that we can prune HOT chains and
1042  * defragment the page.
1043  */
1045  {
1046  bool hastup;
1047 
1048  /*
1049  * If we're not performing an aggressive scan to guard against XID
1050  * wraparound, and we don't want to forcibly check the page, then
1051  * it's OK to skip vacuuming pages we get a lock conflict on. They
1052  * will be dealt with in some future vacuum.
1053  */
1054  if (!aggressive && !FORCE_CHECK_PAGE())
1055  {
1056  ReleaseBuffer(buf);
1057  vacrel->pinskipped_pages++;
1058  continue;
1059  }
1060 
1061  /*
1062  * Read the page with share lock to see if any xids on it need to
1063  * be frozen. If not we just skip the page, after updating our
1064  * scan statistics. If there are some, we wait for cleanup lock.
1065  *
1066  * We could defer the lock request further by remembering the page
1067  * and coming back to it later, or we could even register
1068  * ourselves for multiple buffers and then service whichever one
1069  * is received first. For now, this seems good enough.
1070  *
1071  * If we get here with aggressive false, then we're just forcibly
1072  * checking the page, and so we don't want to insist on getting
1073  * the lock; we only need to know if the page contains tuples, so
1074  * that we can update nonempty_pages correctly. It's convenient
1075  * to use lazy_check_needs_freeze() for both situations, though.
1076  */
1078  if (!lazy_check_needs_freeze(buf, &hastup, vacrel))
1079  {
1081  vacrel->scanned_pages++;
1082  vacrel->pinskipped_pages++;
1083  if (hastup)
1084  vacrel->nonempty_pages = blkno + 1;
1085  continue;
1086  }
1087  if (!aggressive)
1088  {
1089  /*
1090  * Here, we must not advance scanned_pages; that would amount
1091  * to claiming that the page contains no freezable tuples.
1092  */
1094  vacrel->pinskipped_pages++;
1095  if (hastup)
1096  vacrel->nonempty_pages = blkno + 1;
1097  continue;
1098  }
1101  /* drop through to normal processing */
1102  }
1103 
1104  /*
1105  * By here we definitely have enough dead_items space for whatever
1106  * LP_DEAD tids are on this page, we have the visibility map page set
1107  * up in case we need to set this page's all_visible/all_frozen bit,
1108  * and we have a cleanup lock. Any tuples on this page are now sure
1109  * to be "counted" by this VACUUM.
1110  *
1111  * One last piece of preamble needs to take place before we can prune:
1112  * we need to consider new and empty pages.
1113  */
1114  vacrel->scanned_pages++;
1115  vacrel->tupcount_pages++;
1116 
1117  page = BufferGetPage(buf);
1118 
1119  if (PageIsNew(page))
1120  {
1121  /*
1122  * All-zeroes pages can be left over if either a backend extends
1123  * the relation by a single page, but crashes before the newly
1124  * initialized page has been written out, or when bulk-extending
1125  * the relation (which creates a number of empty pages at the tail
1126  * end of the relation, but enters them into the FSM).
1127  *
1128  * Note we do not enter the page into the visibilitymap. That has
1129  * the downside that we repeatedly visit this page in subsequent
1130  * vacuums, but otherwise we'll never not discover the space on a
1131  * promoted standby. The harm of repeated checking ought to
1132  * normally not be too bad - the space usually should be used at
1133  * some point, otherwise there wouldn't be any regular vacuums.
1134  *
1135  * Make sure these pages are in the FSM, to ensure they can be
1136  * reused. Do that by testing if there's any space recorded for
1137  * the page. If not, enter it. We do so after releasing the lock
1138  * on the heap page, the FSM is approximate, after all.
1139  */
1141 
1142  if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1143  {
1144  Size freespace = BLCKSZ - SizeOfPageHeaderData;
1145 
1146  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1147  }
1148  continue;
1149  }
1150 
1151  if (PageIsEmpty(page))
1152  {
1153  Size freespace = PageGetHeapFreeSpace(page);
1154 
1155  /*
1156  * Empty pages are always all-visible and all-frozen (note that
1157  * the same is currently not true for new pages, see above).
1158  */
1159  if (!PageIsAllVisible(page))
1160  {
1162 
1163  /* mark buffer dirty before writing a WAL record */
1165 
1166  /*
1167  * It's possible that another backend has extended the heap,
1168  * initialized the page, and then failed to WAL-log the page
1169  * due to an ERROR. Since heap extension is not WAL-logged,
1170  * recovery might try to replay our record setting the page
1171  * all-visible and find that the page isn't initialized, which
1172  * will cause a PANIC. To prevent that, check whether the
1173  * page has been previously WAL-logged, and if not, do that
1174  * now.
1175  */
1176  if (RelationNeedsWAL(vacrel->rel) &&
1177  PageGetLSN(page) == InvalidXLogRecPtr)
1178  log_newpage_buffer(buf, true);
1179 
1180  PageSetAllVisible(page);
1181  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1182  vmbuffer, InvalidTransactionId,
1184  END_CRIT_SECTION();
1185  }
1186 
1188  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1189  continue;
1190  }
1191 
1192  /*
1193  * Prune and freeze tuples.
1194  *
1195  * Accumulates details of remaining LP_DEAD line pointers on page in
1196  * dead_items array. This includes LP_DEAD line pointers that we
1197  * pruned ourselves, as well as existing LP_DEAD line pointers that
1198  * were pruned some time earlier. Also considers freezing XIDs in the
1199  * tuple headers of remaining items with storage.
1200  */
1201  lazy_scan_prune(vacrel, buf, blkno, page, vistest, &prunestate);
1202 
1203  Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
1204 
1205  /* Remember the location of the last page with nonremovable tuples */
1206  if (prunestate.hastup)
1207  vacrel->nonempty_pages = blkno + 1;
1208 
1209  if (vacrel->nindexes == 0)
1210  {
1211  /*
1212  * Consider the need to do page-at-a-time heap vacuuming when
1213  * using the one-pass strategy now.
1214  *
1215  * The one-pass strategy will never call lazy_vacuum(). The steps
1216  * performed here can be thought of as the one-pass equivalent of
1217  * a call to lazy_vacuum().
1218  */
1219  if (prunestate.has_lpdead_items)
1220  {
1221  Size freespace;
1222 
1223  lazy_vacuum_heap_page(vacrel, blkno, buf, 0, &vmbuffer);
1224 
1225  /* Forget the LP_DEAD items that we just vacuumed */
1226  dead_items->num_items = 0;
1227 
1228  /*
1229  * Periodically perform FSM vacuuming to make newly-freed
1230  * space visible on upper FSM pages. Note we have not yet
1231  * performed FSM processing for blkno.
1232  */
1233  if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1234  {
1235  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1236  blkno);
1237  next_fsm_block_to_vacuum = blkno;
1238  }
1239 
1240  /*
1241  * Now perform FSM processing for blkno, and move on to next
1242  * page.
1243  *
1244  * Our call to lazy_vacuum_heap_page() will have considered if
1245  * it's possible to set all_visible/all_frozen independently
1246  * of lazy_scan_prune(). Note that prunestate was invalidated
1247  * by lazy_vacuum_heap_page() call.
1248  */
1249  freespace = PageGetHeapFreeSpace(page);
1250 
1252  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1253  continue;
1254  }
1255 
1256  /*
1257  * There was no call to lazy_vacuum_heap_page() because pruning
1258  * didn't encounter/create any LP_DEAD items that needed to be
1259  * vacuumed. Prune state has not been invalidated, so proceed
1260  * with prunestate-driven visibility map and FSM steps (just like
1261  * the two-pass strategy).
1262  */
1263  Assert(dead_items->num_items == 0);
1264  }
1265 
1266  /*
1267  * Handle setting visibility map bit based on what the VM said about
1268  * the page before pruning started, and using prunestate
1269  */
1270  if (!all_visible_according_to_vm && prunestate.all_visible)
1271  {
1273 
1274  if (prunestate.all_frozen)
1275  flags |= VISIBILITYMAP_ALL_FROZEN;
1276 
1277  /*
1278  * It should never be the case that the visibility map page is set
1279  * while the page-level bit is clear, but the reverse is allowed
1280  * (if checksums are not enabled). Regardless, set both bits so
1281  * that we get back in sync.
1282  *
1283  * NB: If the heap page is all-visible but the VM bit is not set,
1284  * we don't need to dirty the heap page. However, if checksums
1285  * are enabled, we do need to make sure that the heap page is
1286  * dirtied before passing it to visibilitymap_set(), because it
1287  * may be logged. Given that this situation should only happen in
1288  * rare cases after a crash, it is not worth optimizing.
1289  */
1290  PageSetAllVisible(page);
1292  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1293  vmbuffer, prunestate.visibility_cutoff_xid,
1294  flags);
1295  }
1296 
1297  /*
1298  * As of PostgreSQL 9.2, the visibility map bit should never be set if
1299  * the page-level bit is clear. However, it's possible that the bit
1300  * got cleared after we checked it and before we took the buffer
1301  * content lock, so we must recheck before jumping to the conclusion
1302  * that something bad has happened.
1303  */
1304  else if (all_visible_according_to_vm && !PageIsAllVisible(page)
1305  && VM_ALL_VISIBLE(vacrel->rel, blkno, &vmbuffer))
1306  {
1307  elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1308  vacrel->relname, blkno);
1309  visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1311  }
1312 
1313  /*
1314  * It's possible for the value returned by
1315  * GetOldestNonRemovableTransactionId() to move backwards, so it's not
1316  * wrong for us to see tuples that appear to not be visible to
1317  * everyone yet, while PD_ALL_VISIBLE is already set. The real safe
1318  * xmin value never moves backwards, but
1319  * GetOldestNonRemovableTransactionId() is conservative and sometimes
1320  * returns a value that's unnecessarily small, so if we see that
1321  * contradiction it just means that the tuples that we think are not
1322  * visible to everyone yet actually are, and the PD_ALL_VISIBLE flag
1323  * is correct.
1324  *
1325  * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE
1326  * set, however.
1327  */
1328  else if (prunestate.has_lpdead_items && PageIsAllVisible(page))
1329  {
1330  elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
1331  vacrel->relname, blkno);
1332  PageClearAllVisible(page);
1334  visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1336  }
1337 
1338  /*
1339  * If the all-visible page is all-frozen but not marked as such yet,
1340  * mark it as all-frozen. Note that all_frozen is only valid if
1341  * all_visible is true, so we must check both.
1342  */
1343  else if (all_visible_according_to_vm && prunestate.all_visible &&
1344  prunestate.all_frozen &&
1345  !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
1346  {
1347  /*
1348  * We can pass InvalidTransactionId as the cutoff XID here,
1349  * because setting the all-frozen bit doesn't cause recovery
1350  * conflicts.
1351  */
1352  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1353  vmbuffer, InvalidTransactionId,
1355  }
1356 
1357  /*
1358  * Final steps for block: drop cleanup lock, record free space in the
1359  * FSM
1360  */
1361  if (prunestate.has_lpdead_items && vacrel->do_index_vacuuming)
1362  {
1363  /*
1364  * Wait until lazy_vacuum_heap_rel() to save free space. This
1365  * doesn't just save us some cycles; it also allows us to record
1366  * any additional free space that lazy_vacuum_heap_page() will
1367  * make available in cases where it's possible to truncate the
1368  * page's line pointer array.
1369  *
1370  * Note: It's not in fact 100% certain that we really will call
1371  * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip
1372  * index vacuuming (and so must skip heap vacuuming). This is
1373  * deemed okay because it only happens in emergencies, or when
1374  * there is very little free space anyway. (Besides, we start
1375  * recording free space in the FSM once index vacuuming has been
1376  * abandoned.)
1377  *
1378  * Note: The one-pass (no indexes) case is only supposed to make
1379  * it this far when there were no LP_DEAD items during pruning.
1380  */
1381  Assert(vacrel->nindexes > 0);
1383  }
1384  else
1385  {
1386  Size freespace = PageGetHeapFreeSpace(page);
1387 
1389  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1390  }
1391  }
1392 
1393  /* report that everything is now scanned */
1395 
1396  /* Clear the block number information */
1397  vacrel->blkno = InvalidBlockNumber;
1398 
1399  /* now we can compute the new value for pg_class.reltuples */
1400  vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, nblocks,
1401  vacrel->tupcount_pages,
1402  vacrel->live_tuples);
1403 
1404  /*
1405  * Also compute the total number of surviving heap entries. In the
1406  * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1407  */
1408  vacrel->new_rel_tuples =
1409  Max(vacrel->new_live_tuples, 0) + vacrel->new_dead_tuples;
1410 
1411  /*
1412  * Release any remaining pin on visibility map page.
1413  */
1414  if (BufferIsValid(vmbuffer))
1415  {
1416  ReleaseBuffer(vmbuffer);
1417  vmbuffer = InvalidBuffer;
1418  }
1419 
1420  /* Perform a final round of index and heap vacuuming */
1421  if (dead_items->num_items > 0)
1422  lazy_vacuum(vacrel);
1423 
1424  /*
1425  * Vacuum the remainder of the Free Space Map. We must do this whether or
1426  * not there were indexes, and whether or not we bypassed index vacuuming.
1427  */
1428  if (blkno > next_fsm_block_to_vacuum)
1429  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno);
1430 
1431  /* report all blocks vacuumed */
1433 
1434  /* Do post-vacuum cleanup */
1435  if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1436  lazy_cleanup_all_indexes(vacrel);
1437 
1438  /*
1439  * Free resources managed by dead_items_alloc. This will end parallel
1440  * mode when needed (it must end before updating index statistics as we
1441  * can't write in parallel mode).
1442  */
1443  dead_items_cleanup(vacrel);
1444 
1445  /* Update index statistics */
1446  if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1447  update_index_statistics(vacrel);
1448 }
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3757
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1565
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4053
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4230
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:212
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:984
#define PageIsAllVisible(page)
Definition: bufpage.h:384
#define SizeOfPageHeaderData
Definition: bufpage.h:215
#define PageClearAllVisible(page)
Definition: bufpage.h:388
#define PageSetAllVisible(page)
Definition: bufpage.h:386
#define PageGetLSN(page)
Definition: bufpage.h:365
unsigned char uint8
Definition: c.h:439
size_t Size
Definition: c.h:540
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:352
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:230
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:181
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4042
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:30
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define PROGRESS_VACUUM_MAX_DEAD_TUPLES
Definition: progress.h:26
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
#define RelationNeedsWAL(relation)
Definition: rel.h:602
TransactionId visibility_cutoff_xid
Definition: vacuumlazy.c:230
int64 live_tuples
Definition: vacuumlazy.c:212
BlockNumber blkno
Definition: vacuumlazy.c:175
int64 num_tuples
Definition: vacuumlazy.c:211
int nworkers
Definition: vacuum.h:235
void vacuum_delay_point(void)
Definition: vacuum.c:2156
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1230
static void dead_items_cleanup(LVRelState *vacrel)
Definition: vacuumlazy.c:2944
#define FORCE_CHECK_PAGE()
static void update_index_statistics(LVRelState *vacrel)
Definition: vacuumlazy.c:3082
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, GlobalVisState *vistest, LVPagePruneState *prunestate)
Definition: vacuumlazy.c:1471
static void lazy_vacuum(LVRelState *vacrel)
Definition: vacuumlazy.c:1860
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2403
static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer *vmbuffer)
Definition: vacuumlazy.c:2175
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
Definition: vacuumlazy.c:2363
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
Definition: vacuumlazy.c:2887
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:114
#define FAILSAFE_EVERY_PAGES
Definition: vacuumlazy.c:98
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel)
Definition: vacuumlazy.c:2297
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:107
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
#define VM_ALL_VISIBLE(r, b, v)
Definition: visibilitymap.h:24
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:26
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1144

References LVPagePruneState::all_frozen, LVPagePruneState::all_visible, Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, ConditionalLockBufferForCleanup(), LVRelState::consider_bypass_optimization, LVRelState::dead_items, dead_items_alloc(), dead_items_cleanup(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, elog, END_CRIT_SECTION, FAILSAFE_EVERY_PAGES, FORCE_CHECK_PAGE, FreeSpaceMapVacuumRange(), LVRelState::frozenskipped_pages, GetRecordedFreeSpace(), GlobalVisTestFor(), LVPagePruneState::has_lpdead_items, LVPagePruneState::hastup, LVRelState::indstats, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, InvalidTransactionId, InvalidXLogRecPtr, lazy_check_needs_freeze(), lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_prune(), lazy_vacuum(), lazy_vacuum_heap_page(), LVRelState::live_tuples, LockBuffer(), LockBufferForCleanup(), log_newpage_buffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, MarkBufferDirty(), Max, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::new_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::num_index_scans, VacDeadItems::num_items, LVRelState::num_tuples, VacuumParams::nworkers, VacuumParams::options, PageClearAllVisible, PageGetHeapFreeSpace(), PageGetLSN, PageIsAllVisible, PageIsEmpty, PageIsNew, LVRelState::pages_removed, PageSetAllVisible, palloc0(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), LVRelState::pinskipped_pages, PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationNeedsWAL, ReleaseBuffer(), LVRelState::relname, LVRelState::scanned_pages, SizeOfPageHeaderData, SKIP_PAGES_THRESHOLD, START_CRIT_SECTION, LVRelState::tupcount_pages, LVRelState::tuples_deleted, UnlockReleaseBuffer(), update_index_statistics(), update_vacuum_error_info(), vac_estimate_reltuples(), VACOPT_DISABLE_PAGE_SKIPPING, vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, LVPagePruneState::visibility_cutoff_xid, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, VM_ALL_FROZEN, VM_ALL_VISIBLE, and WARNING.

Referenced by heap_vacuum_rel().

◆ lazy_scan_prune()

static void lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
GlobalVisState vistest,
LVPagePruneState prunestate 
)
static

Definition at line 1471 of file vacuumlazy.c.

1477 {
1478  Relation rel = vacrel->rel;
1479  OffsetNumber offnum,
1480  maxoff;
1481  ItemId itemid;
1482  HeapTupleData tuple;
1483  HTSV_Result res;
1484  int tuples_deleted,
1485  lpdead_items,
1486  new_dead_tuples,
1487  num_tuples,
1488  live_tuples;
1489  int nnewlpdead;
1490  int nfrozen;
1491  OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
1493 
1494  maxoff = PageGetMaxOffsetNumber(page);
1495 
1496 retry:
1497 
1498  /* Initialize (or reset) page-level counters */
1499  tuples_deleted = 0;
1500  lpdead_items = 0;
1501  new_dead_tuples = 0;
1502  num_tuples = 0;
1503  live_tuples = 0;
1504 
1505  /*
1506  * Prune all HOT-update chains in this page.
1507  *
1508  * We count tuples removed by the pruning step as tuples_deleted. Its
1509  * final value can be thought of as the number of tuples that have been
1510  * deleted from the table. It should not be confused with lpdead_items;
1511  * lpdead_items's final value can be thought of as the number of tuples
1512  * that were deleted from indexes.
1513  */
1514  tuples_deleted = heap_page_prune(rel, buf, vistest,
1515  InvalidTransactionId, 0, &nnewlpdead,
1516  &vacrel->offnum);
1517 
1518  /*
1519  * Now scan the page to collect LP_DEAD items and check for tuples
1520  * requiring freezing among remaining tuples with storage
1521  */
1522  prunestate->hastup = false;
1523  prunestate->has_lpdead_items = false;
1524  prunestate->all_visible = true;
1525  prunestate->all_frozen = true;
1527  nfrozen = 0;
1528 
1529  for (offnum = FirstOffsetNumber;
1530  offnum <= maxoff;
1531  offnum = OffsetNumberNext(offnum))
1532  {
1533  bool tuple_totally_frozen;
1534 
1535  /*
1536  * Set the offset number so that we can display it along with any
1537  * error that occurred while processing this tuple.
1538  */
1539  vacrel->offnum = offnum;
1540  itemid = PageGetItemId(page, offnum);
1541 
1542  if (!ItemIdIsUsed(itemid))
1543  continue;
1544 
1545  /* Redirect items mustn't be touched */
1546  if (ItemIdIsRedirected(itemid))
1547  {
1548  prunestate->hastup = true; /* page won't be truncatable */
1549  continue;
1550  }
1551 
1552  /*
1553  * LP_DEAD items are processed outside of the loop.
1554  *
1555  * Note that we deliberately don't set hastup=true in the case of an
1556  * LP_DEAD item here, which is not how lazy_check_needs_freeze() or
1557  * count_nondeletable_pages() do it -- they only consider pages empty
1558  * when they only have LP_UNUSED items, which is important for
1559  * correctness.
1560  *
1561  * Our assumption is that any LP_DEAD items we encounter here will
1562  * become LP_UNUSED inside lazy_vacuum_heap_page() before we actually
1563  * call count_nondeletable_pages(). In any case our opinion of
1564  * whether or not a page 'hastup' (which is how our caller sets its
1565  * vacrel->nonempty_pages value) is inherently race-prone. It must be
1566  * treated as advisory/unreliable, so we might as well be slightly
1567  * optimistic.
1568  */
1569  if (ItemIdIsDead(itemid))
1570  {
1571  deadoffsets[lpdead_items++] = offnum;
1572  prunestate->all_visible = false;
1573  prunestate->has_lpdead_items = true;
1574  continue;
1575  }
1576 
1577  Assert(ItemIdIsNormal(itemid));
1578 
1579  ItemPointerSet(&(tuple.t_self), blkno, offnum);
1580  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1581  tuple.t_len = ItemIdGetLength(itemid);
1582  tuple.t_tableOid = RelationGetRelid(rel);
1583 
1584  /*
1585  * DEAD tuples are almost always pruned into LP_DEAD line pointers by
1586  * heap_page_prune(), but it's possible that the tuple state changed
1587  * since heap_page_prune() looked. Handle that here by restarting.
1588  * (See comments at the top of function for a full explanation.)
1589  */
1590  res = HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf);
1591 
1592  if (unlikely(res == HEAPTUPLE_DEAD))
1593  goto retry;
1594 
1595  /*
1596  * The criteria for counting a tuple as live in this block need to
1597  * match what analyze.c's acquire_sample_rows() does, otherwise VACUUM
1598  * and ANALYZE may produce wildly different reltuples values, e.g.
1599  * when there are many recently-dead tuples.
1600  *
1601  * The logic here is a bit simpler than acquire_sample_rows(), as
1602  * VACUUM can't run inside a transaction block, which makes some cases
1603  * impossible (e.g. in-progress insert from the same transaction).
1604  *
1605  * We treat LP_DEAD items (which are the closest thing to DEAD tuples
1606  * that might be seen here) differently, too: we assume that they'll
1607  * become LP_UNUSED before VACUUM finishes. This difference is only
1608  * superficial. VACUUM effectively agrees with ANALYZE about DEAD
1609  * items, in the end. VACUUM won't remember LP_DEAD items, but only
1610  * because they're not supposed to be left behind when it is done.
1611  * (Cases where we bypass index vacuuming will violate this optimistic
1612  * assumption, but the overall impact of that should be negligible.)
1613  */
1614  switch (res)
1615  {
1616  case HEAPTUPLE_LIVE:
1617 
1618  /*
1619  * Count it as live. Not only is this natural, but it's also
1620  * what acquire_sample_rows() does.
1621  */
1622  live_tuples++;
1623 
1624  /*
1625  * Is the tuple definitely visible to all transactions?
1626  *
1627  * NB: Like with per-tuple hint bits, we can't set the
1628  * PD_ALL_VISIBLE flag if the inserter committed
1629  * asynchronously. See SetHintBits for more info. Check that
1630  * the tuple is hinted xmin-committed because of that.
1631  */
1632  if (prunestate->all_visible)
1633  {
1634  TransactionId xmin;
1635 
1637  {
1638  prunestate->all_visible = false;
1639  break;
1640  }
1641 
1642  /*
1643  * The inserter definitely committed. But is it old enough
1644  * that everyone sees it as committed?
1645  */
1646  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1647  if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin))
1648  {
1649  prunestate->all_visible = false;
1650  break;
1651  }
1652 
1653  /* Track newest xmin on page. */
1654  if (TransactionIdFollows(xmin, prunestate->visibility_cutoff_xid))
1655  prunestate->visibility_cutoff_xid = xmin;
1656  }
1657  break;
1659 
1660  /*
1661  * If tuple is recently deleted then we must not remove it
1662  * from relation. (We only remove items that are LP_DEAD from
1663  * pruning.)
1664  */
1665  new_dead_tuples++;
1666  prunestate->all_visible = false;
1667  break;
1669 
1670  /*
1671  * We do not count these rows as live, because we expect the
1672  * inserting transaction to update the counters at commit, and
1673  * we assume that will happen only after we report our
1674  * results. This assumption is a bit shaky, but it is what
1675  * acquire_sample_rows() does, so be consistent.
1676  */
1677  prunestate->all_visible = false;
1678  break;
1680  /* This is an expected case during concurrent vacuum */
1681  prunestate->all_visible = false;
1682 
1683  /*
1684  * Count such rows as live. As above, we assume the deleting
1685  * transaction will commit and update the counters after we
1686  * report.
1687  */
1688  live_tuples++;
1689  break;
1690  default:
1691  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1692  break;
1693  }
1694 
1695  /*
1696  * Non-removable tuple (i.e. tuple with storage).
1697  *
1698  * Check tuple left behind after pruning to see if needs to be frozen
1699  * now.
1700  */
1701  num_tuples++;
1702  prunestate->hastup = true;
1704  vacrel->relfrozenxid,
1705  vacrel->relminmxid,
1706  vacrel->FreezeLimit,
1707  vacrel->MultiXactCutoff,
1708  &frozen[nfrozen],
1709  &tuple_totally_frozen))
1710  {
1711  /* Will execute freeze below */
1712  frozen[nfrozen++].offset = offnum;
1713  }
1714 
1715  /*
1716  * If tuple is not frozen (and not about to become frozen) then caller
1717  * had better not go on to set this page's VM bit
1718  */
1719  if (!tuple_totally_frozen)
1720  prunestate->all_frozen = false;
1721  }
1722 
1723  /*
1724  * We have now divided every item on the page into either an LP_DEAD item
1725  * that will need to be vacuumed in indexes later, or a LP_NORMAL tuple
1726  * that remains and needs to be considered for freezing now (LP_UNUSED and
1727  * LP_REDIRECT items also remain, but are of no further interest to us).
1728  */
1729  vacrel->offnum = InvalidOffsetNumber;
1730 
1731  /*
1732  * Consider the need to freeze any items with tuple storage from the page
1733  * first (arbitrary)
1734  */
1735  if (nfrozen > 0)
1736  {
1737  Assert(prunestate->hastup);
1738 
1739  /*
1740  * At least one tuple with storage needs to be frozen -- execute that
1741  * now.
1742  *
1743  * If we need to freeze any tuples we'll mark the buffer dirty, and
1744  * write a WAL record recording the changes. We must log the changes
1745  * to be crash-safe against future truncation of CLOG.
1746  */
1748 
1750 
1751  /* execute collected freezes */
1752  for (int i = 0; i < nfrozen; i++)
1753  {
1754  HeapTupleHeader htup;
1755 
1756  itemid = PageGetItemId(page, frozen[i].offset);
1757  htup = (HeapTupleHeader) PageGetItem(page, itemid);
1758 
1759  heap_execute_freeze_tuple(htup, &frozen[i]);
1760  }
1761 
1762  /* Now WAL-log freezing if necessary */
1763  if (RelationNeedsWAL(vacrel->rel))
1764  {
1765  XLogRecPtr recptr;
1766 
1767  recptr = log_heap_freeze(vacrel->rel, buf, vacrel->FreezeLimit,
1768  frozen, nfrozen);
1769  PageSetLSN(page, recptr);
1770  }
1771 
1772  END_CRIT_SECTION();
1773  }
1774 
1775  /*
1776  * The second pass over the heap can also set visibility map bits, using
1777  * the same approach. This is important when the table frequently has a
1778  * few old LP_DEAD items on each page by the time we get to it (typically
1779  * because past opportunistic pruning operations freed some non-HOT
1780  * tuples).
1781  *
1782  * VACUUM will call heap_page_is_all_visible() during the second pass over
1783  * the heap to determine all_visible and all_frozen for the page -- this
1784  * is a specialized version of the logic from this function. Now that
1785  * we've finished pruning and freezing, make sure that we're in total
1786  * agreement with heap_page_is_all_visible() using an assertion.
1787  */
1788 #ifdef USE_ASSERT_CHECKING
1789  /* Note that all_frozen value does not matter when !all_visible */
1790  if (prunestate->all_visible)
1791  {
1792  TransactionId cutoff;
1793  bool all_frozen;
1794 
1795  if (!heap_page_is_all_visible(vacrel, buf, &cutoff, &all_frozen))
1796  Assert(false);
1797 
1798  Assert(lpdead_items == 0);
1799  Assert(prunestate->all_frozen == all_frozen);
1800 
1801  /*
1802  * It's possible that we froze tuples and made the page's XID cutoff
1803  * (for recovery conflict purposes) FrozenTransactionId. This is okay
1804  * because visibility_cutoff_xid will be logged by our caller in a
1805  * moment.
1806  */
1807  Assert(cutoff == FrozenTransactionId ||
1808  cutoff == prunestate->visibility_cutoff_xid);
1809  }
1810 #endif
1811 
1812  /*
1813  * Now save details of the LP_DEAD items from the page in vacrel
1814  */
1815  if (lpdead_items > 0)
1816  {
1817  VacDeadItems *dead_items = vacrel->dead_items;
1818  ItemPointerData tmp;
1819 
1820  Assert(!prunestate->all_visible);
1821  Assert(prunestate->has_lpdead_items);
1822 
1823  vacrel->lpdead_item_pages++;
1824 
1825  ItemPointerSetBlockNumber(&tmp, blkno);
1826 
1827  for (int i = 0; i < lpdead_items; i++)
1828  {
1829  ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]);
1830  dead_items->items[dead_items->num_items++] = tmp;
1831  }
1832 
1833  Assert(dead_items->num_items <= dead_items->max_items);
1835  dead_items->num_items);
1836  }
1837 
1838  /* Finally, add page-local counts to whole-VACUUM counts */
1839  vacrel->tuples_deleted += tuples_deleted;
1840  vacrel->lpdead_items += lpdead_items;
1841  vacrel->new_dead_tuples += new_dead_tuples;
1842  vacrel->num_tuples += num_tuples;
1843  vacrel->live_tuples += live_tuples;
1844 }
#define PageSetLSN(page, lsn)
Definition: bufpage.h:367
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6621
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6392
XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
Definition: heapam.c:7992
HTSV_Result
Definition: heapam.h:94
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:148
#define ItemPointerSetBlockNumber(pointer, blockNumber)
Definition: itemptr.h:138
#define PROGRESS_VACUUM_NUM_DEAD_TUPLES
Definition: progress.h:27
int heap_page_prune(Relation relation, Buffer buffer, GlobalVisState *vistest, TransactionId old_snap_xmin, TimestampTz old_snap_ts, int *nnewlpdead, OffsetNumber *off_loc)
Definition: pruneheap.c:263
ItemPointerData items[FLEXIBLE_ARRAY_MEMBER]
Definition: vacuum.h:247
OffsetNumber offset
Definition: heapam_xlog.h:327
#define FrozenTransactionId
Definition: transam.h:33
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:2969
uint64 XLogRecPtr
Definition: xlogdefs.h:21

References LVPagePruneState::all_frozen, LVPagePruneState::all_visible, Assert(), buf, LVRelState::dead_items, elog, END_CRIT_SECTION, ERROR, FirstOffsetNumber, LVRelState::FreezeLimit, FrozenTransactionId, LVPagePruneState::has_lpdead_items, LVPagePruneState::hastup, heap_execute_freeze_tuple(), heap_page_is_all_visible(), heap_page_prune(), heap_prepare_freeze_tuple(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), i, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, ItemPointerSetBlockNumber, ItemPointerSetOffsetNumber, VacDeadItems::items, LVRelState::live_tuples, log_heap_freeze(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MarkBufferDirty(), VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::MultiXactCutoff, LVRelState::new_dead_tuples, VacDeadItems::num_items, LVRelState::num_tuples, LVRelState::offnum, xl_heap_freeze_tuple::offset, OffsetNumberNext, LVRelState::OldestXmin, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, pgstat_progress_update_param(), PROGRESS_VACUUM_NUM_DEAD_TUPLES, LVRelState::rel, RelationGetRelid, RelationNeedsWAL, LVRelState::relfrozenxid, LVRelState::relminmxid, res, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdPrecedes(), LVRelState::tuples_deleted, unlikely, and LVPagePruneState::visibility_cutoff_xid.

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 2578 of file vacuumlazy.c.

2579 {
2580  BlockNumber orig_rel_pages = vacrel->rel_pages;
2581  BlockNumber new_rel_pages;
2582  bool lock_waiter_detected;
2583  int lock_retry;
2584 
2585  /* Report that we are now truncating */
2588 
2589  /*
2590  * Loop until no more truncating can be done.
2591  */
2592  do
2593  {
2594  /*
2595  * We need full exclusive lock on the relation in order to do
2596  * truncation. If we can't get it, give up rather than waiting --- we
2597  * don't want to block other backends, and we don't want to deadlock
2598  * (which is quite possible considering we already hold a lower-grade
2599  * lock).
2600  */
2601  lock_waiter_detected = false;
2602  lock_retry = 0;
2603  while (true)
2604  {
2606  break;
2607 
2608  /*
2609  * Check for interrupts while trying to (re-)acquire the exclusive
2610  * lock.
2611  */
2613 
2614  if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
2616  {
2617  /*
2618  * We failed to establish the lock in the specified number of
2619  * retries. This means we give up truncating.
2620  */
2621  ereport(vacrel->verbose ? INFO : DEBUG2,
2622  (errmsg("\"%s\": stopping truncate due to conflicting lock request",
2623  vacrel->relname)));
2624  return;
2625  }
2626 
2627  (void) WaitLatch(MyLatch,
2632  }
2633 
2634  /*
2635  * Now that we have exclusive lock, look to see if the rel has grown
2636  * whilst we were vacuuming with non-exclusive lock. If so, give up;
2637  * the newly added pages presumably contain non-deletable tuples.
2638  */
2639  new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
2640  if (new_rel_pages != orig_rel_pages)
2641  {
2642  /*
2643  * Note: we intentionally don't update vacrel->rel_pages with the
2644  * new rel size here. If we did, it would amount to assuming that
2645  * the new pages are empty, which is unlikely. Leaving the numbers
2646  * alone amounts to assuming that the new pages have the same
2647  * tuple density as existing ones, which is less unlikely.
2648  */
2650  return;
2651  }
2652 
2653  /*
2654  * Scan backwards from the end to verify that the end pages actually
2655  * contain no tuples. This is *necessary*, not optional, because
2656  * other backends could have added tuples to these pages whilst we
2657  * were vacuuming.
2658  */
2659  new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
2660  vacrel->blkno = new_rel_pages;
2661 
2662  if (new_rel_pages >= orig_rel_pages)
2663  {
2664  /* can't do anything after all */
2666  return;
2667  }
2668 
2669  /*
2670  * Okay to truncate.
2671  */
2672  RelationTruncate(vacrel->rel, new_rel_pages);
2673 
2674  /*
2675  * We can release the exclusive lock as soon as we have truncated.
2676  * Other backends can't safely access the relation until they have
2677  * processed the smgr invalidation that smgrtruncate sent out ... but
2678  * that should happen as part of standard invalidation processing once
2679  * they acquire lock on the relation.
2680  */
2682 
2683  /*
2684  * Update statistics. Here, it *is* correct to adjust rel_pages
2685  * without also touching reltuples, since the tuple count wasn't
2686  * changed by the truncation.
2687  */
2688  vacrel->pages_removed += orig_rel_pages - new_rel_pages;
2689  vacrel->rel_pages = new_rel_pages;
2690 
2691  ereport(vacrel->verbose ? INFO : DEBUG2,
2692  (errmsg("table \"%s\": truncated %u to %u pages",
2693  vacrel->relname,
2694  orig_rel_pages, new_rel_pages)));
2695  orig_rel_pages = new_rel_pages;
2696  } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
2697 }
struct Latch * MyLatch
Definition: globals.c:57
void ResetLatch(Latch *latch)
Definition: latch.c:660
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:452
#define WL_TIMEOUT
Definition: latch.h:128
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
#define WL_LATCH_SET
Definition: latch.h:125
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:283
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:248
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:34
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:277
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:86
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:87
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
Definition: vacuumlazy.c:2705
@ WAIT_EVENT_VACUUM_TRUNCATE
Definition: wait_event.h:148

References AccessExclusiveLock, LVRelState::blkno, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), INFO, MyLatch, LVRelState::nonempty_pages, LVRelState::pages_removed, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationTruncate(), LVRelState::relname, ResetLatch(), UnlockRelation(), VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, LVRelState::verbose, WAIT_EVENT_VACUUM_TRUNCATE, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 1860 of file vacuumlazy.c.

1861 {
1862  bool bypass;
1863 
1864  /* Should not end up here with no indexes */
1865  Assert(vacrel->nindexes > 0);
1866  Assert(vacrel->lpdead_item_pages > 0);
1867 
1868  if (!vacrel->do_index_vacuuming)
1869  {
1870  Assert(!vacrel->do_index_cleanup);
1871  vacrel->dead_items->num_items = 0;
1872  return;
1873  }
1874 
1875  /*
1876  * Consider bypassing index vacuuming (and heap vacuuming) entirely.
1877  *
1878  * We currently only do this in cases where the number of LP_DEAD items
1879  * for the entire VACUUM operation is close to zero. This avoids sharp
1880  * discontinuities in the duration and overhead of successive VACUUM
1881  * operations that run against the same table with a fixed workload.
1882  * Ideally, successive VACUUM operations will behave as if there are
1883  * exactly zero LP_DEAD items in cases where there are close to zero.
1884  *
1885  * This is likely to be helpful with a table that is continually affected
1886  * by UPDATEs that can mostly apply the HOT optimization, but occasionally
1887  * have small aberrations that lead to just a few heap pages retaining
1888  * only one or two LP_DEAD items. This is pretty common; even when the
1889  * DBA goes out of their way to make UPDATEs use HOT, it is practically
1890  * impossible to predict whether HOT will be applied in 100% of cases.
1891  * It's far easier to ensure that 99%+ of all UPDATEs against a table use
1892  * HOT through careful tuning.
1893  */
1894  bypass = false;
1895  if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
1896  {
1897  BlockNumber threshold;
1898 
1899  Assert(vacrel->num_index_scans == 0);
1900  Assert(vacrel->lpdead_items == vacrel->dead_items->num_items);
1901  Assert(vacrel->do_index_vacuuming);
1902  Assert(vacrel->do_index_cleanup);
1903 
1904  /*
1905  * This crossover point at which we'll start to do index vacuuming is
1906  * expressed as a percentage of the total number of heap pages in the
1907  * table that are known to have at least one LP_DEAD item. This is
1908  * much more important than the total number of LP_DEAD items, since
1909  * it's a proxy for the number of heap pages whose visibility map bits
1910  * cannot be set on account of bypassing index and heap vacuuming.
1911  *
1912  * We apply one further precautionary test: the space currently used
1913  * to store the TIDs (TIDs that now all point to LP_DEAD items) must
1914  * not exceed 32MB. This limits the risk that we will bypass index
1915  * vacuuming again and again until eventually there is a VACUUM whose
1916  * dead_items space is not CPU cache resident.
1917  *
1918  * We don't take any special steps to remember the LP_DEAD items (such
1919  * as counting them in new_dead_tuples report to the stats collector)
1920  * when the optimization is applied. Though the accounting used in
1921  * analyze.c's acquire_sample_rows() will recognize the same LP_DEAD
1922  * items as dead rows in its own stats collector report, that's okay.
1923  * The discrepancy should be negligible. If this optimization is ever
1924  * expanded to cover more cases then this may need to be reconsidered.
1925  */
1926  threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
1927  bypass = (vacrel->lpdead_item_pages < threshold &&
1928  vacrel->lpdead_items < MAXDEADITEMS(32L * 1024L * 1024L));
1929  }
1930 
1931  if (bypass)
1932  {
1933  /*
1934  * There are almost zero TIDs. Behave as if there were precisely
1935  * zero: bypass index vacuuming, but do index cleanup.
1936  *
1937  * We expect that the ongoing VACUUM operation will finish very
1938  * quickly, so there is no point in considering speeding up as a
1939  * failsafe against wraparound failure. (Index cleanup is expected to
1940  * finish very quickly in cases where there were no ambulkdelete()
1941  * calls.)
1942  */
1943  vacrel->do_index_vacuuming = false;
1944  }
1945  else if (lazy_vacuum_all_indexes(vacrel))
1946  {
1947  /*
1948  * We successfully completed a round of index vacuuming. Do related
1949  * heap vacuuming now.
1950  */
1951  lazy_vacuum_heap_rel(vacrel);
1952  }
1953  else
1954  {
1955  /*
1956  * Failsafe case.
1957  *
1958  * We attempted index vacuuming, but didn't finish a full round/full
1959  * index scan. This happens when relfrozenxid or relminmxid is too
1960  * far in the past.
1961  *
1962  * From this point on the VACUUM operation will do no further index
1963  * vacuuming or heap vacuuming. This VACUUM operation won't end up
1964  * back here again.
1965  */
1966  Assert(vacrel->failsafe_active);
1967  }
1968 
1969  /*
1970  * Forget the LP_DEAD items that we just vacuumed (or just decided to not
1971  * vacuum)
1972  */
1973  vacrel->dead_items->num_items = 0;
1974 }
#define BYPASS_THRESHOLD_PAGES
Definition: vacuumlazy.c:93
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:1985
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
Definition: vacuumlazy.c:2082

References Assert(), BYPASS_THRESHOLD_PAGES, LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::failsafe_active, lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAXDEADITEMS, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItems::num_items, and LVRelState::rel_pages.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 1985 of file vacuumlazy.c.

1986 {
1987  bool allindexes = true;
1988 
1989  Assert(vacrel->nindexes > 0);
1990  Assert(vacrel->do_index_vacuuming);
1991  Assert(vacrel->do_index_cleanup);
1994 
1995  /* Precheck for XID wraparound emergencies */
1996  if (lazy_check_wraparound_failsafe(vacrel))
1997  {
1998  /* Wraparound emergency -- don't even start an index scan */
1999  return false;
2000  }
2001 
2002  /* Report that we are now vacuuming indexes */
2005 
2006  if (!ParallelVacuumIsActive(vacrel))
2007  {
2008  for (int idx = 0; idx < vacrel->nindexes; idx++)
2009  {
2010  Relation indrel = vacrel->indrels[idx];
2011  IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2012 
2013  vacrel->indstats[idx] =
2014  lazy_vacuum_one_index(indrel, istat, vacrel->old_live_tuples,
2015  vacrel);
2016 
2017  if (lazy_check_wraparound_failsafe(vacrel))
2018  {
2019  /* Wraparound emergency -- end current index scan */
2020  allindexes = false;
2021  break;
2022  }
2023  }
2024  }
2025  else
2026  {
2027  /* Outsource everything to parallel variant */
2029  vacrel->num_index_scans);
2030 
2031  /*
2032  * Do a postcheck to consider applying wraparound failsafe now. Note
2033  * that parallel VACUUM only gets the precheck and this postcheck.
2034  */
2035  if (lazy_check_wraparound_failsafe(vacrel))
2036  allindexes = false;
2037  }
2038 
2039  /*
2040  * We delete all LP_DEAD items from the first heap pass in all indexes on
2041  * each call here (except calls where we choose to do the failsafe). This
2042  * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2043  * of the failsafe triggering, which prevents the next call from taking
2044  * place).
2045  */
2046  Assert(vacrel->num_index_scans > 0 ||
2047  vacrel->dead_items->num_items == vacrel->lpdead_items);
2048  Assert(allindexes || vacrel->failsafe_active);
2049 
2050  /*
2051  * Increase and report the number of index scans.
2052  *
2053  * We deliberately include the case where we started a round of bulk
2054  * deletes that we weren't able to finish due to the failsafe triggering.
2055  */
2056  vacrel->num_index_scans++;
2058  vacrel->num_index_scans);
2059 
2060  return allindexes;
2061 }
#define MultiXactIdIsValid(multi)
Definition: multixact.h:28
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:31
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
Definition: vacuumlazy.c:2450
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)

References Assert(), LVRelState::dead_items, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::failsafe_active, idx(), LVRelState::indrels, LVRelState::indstats, lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), LVRelState::lpdead_items, MultiXactIdIsValid, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItems::num_items, LVRelState::old_live_tuples, parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_param(), PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, LVRelState::pvs, LVRelState::relfrozenxid, LVRelState::relminmxid, and TransactionIdIsNormal.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static int lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
int  index,
Buffer vmbuffer 
)
static

Definition at line 2175 of file vacuumlazy.c.

2177 {
2178  VacDeadItems *dead_items = vacrel->dead_items;
2179  Page page = BufferGetPage(buffer);
2181  int uncnt = 0;
2182  TransactionId visibility_cutoff_xid;
2183  bool all_frozen;
2184  LVSavedErrInfo saved_err_info;
2185 
2186  Assert(vacrel->nindexes == 0 || vacrel->do_index_vacuuming);
2187 
2189 
2190  /* Update error traceback information */
2191  update_vacuum_error_info(vacrel, &saved_err_info,
2194 
2196 
2197  for (; index < dead_items->num_items; index++)
2198  {
2199  BlockNumber tblk;
2200  OffsetNumber toff;
2201  ItemId itemid;
2202 
2203  tblk = ItemPointerGetBlockNumber(&dead_items->items[index]);
2204  if (tblk != blkno)
2205  break; /* past end of tuples for this block */
2206  toff = ItemPointerGetOffsetNumber(&dead_items->items[index]);
2207  itemid = PageGetItemId(page, toff);
2208 
2209  Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2210  ItemIdSetUnused(itemid);
2211  unused[uncnt++] = toff;
2212  }
2213 
2214  Assert(uncnt > 0);
2215 
2216  /* Attempt to truncate line pointer array now */
2218 
2219  /*
2220  * Mark buffer dirty before we write WAL.
2221  */
2222  MarkBufferDirty(buffer);
2223 
2224  /* XLOG stuff */
2225  if (RelationNeedsWAL(vacrel->rel))
2226  {
2227  xl_heap_vacuum xlrec;
2228  XLogRecPtr recptr;
2229 
2230  xlrec.nunused = uncnt;
2231 
2232  XLogBeginInsert();
2233  XLogRegisterData((char *) &xlrec, SizeOfHeapVacuum);
2234 
2235  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
2236  XLogRegisterBufData(0, (char *) unused, uncnt * sizeof(OffsetNumber));
2237 
2238  recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VACUUM);
2239 
2240  PageSetLSN(page, recptr);
2241  }
2242 
2243  /*
2244  * End critical section, so we safely can do visibility tests (which
2245  * possibly need to perform IO and allocate memory!). If we crash now the
2246  * page (including the corresponding vm bit) might not be marked all
2247  * visible, but that's fine. A later vacuum will fix that.
2248  */
2249  END_CRIT_SECTION();
2250 
2251  /*
2252  * Now that we have removed the LD_DEAD items from the page, once again
2253  * check if the page has become all-visible. The page is already marked
2254  * dirty, exclusively locked, and, if needed, a full page image has been
2255  * emitted.
2256  */
2257  if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
2258  &all_frozen))
2259  PageSetAllVisible(page);
2260 
2261  /*
2262  * All the changes to the heap page have been done. If the all-visible
2263  * flag is now set, also set the VM all-visible bit (and, if possible, the
2264  * all-frozen bit) unless this has already been done previously.
2265  */
2266  if (PageIsAllVisible(page))
2267  {
2268  uint8 flags = 0;
2269  uint8 vm_status = visibilitymap_get_status(vacrel->rel,
2270  blkno, vmbuffer);
2271 
2272  /* Set the VM all-frozen bit to flag, if needed */
2273  if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
2274  flags |= VISIBILITYMAP_ALL_VISIBLE;
2275  if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
2276  flags |= VISIBILITYMAP_ALL_FROZEN;
2277 
2278  Assert(BufferIsValid(*vmbuffer));
2279  if (flags != 0)
2280  visibilitymap_set(vacrel->rel, blkno, buffer, InvalidXLogRecPtr,
2281  *vmbuffer, visibility_cutoff_xid, flags);
2282  }
2283 
2284  /* Revert to the previous phase information for error traceback */
2285  restore_vacuum_error_info(vacrel, &saved_err_info);
2286  return index;
2287 }
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:828
#define XLOG_HEAP2_VACUUM
Definition: heapam_xlog.h:55
#define SizeOfHeapVacuum
Definition: heapam_xlog.h:265
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
Definition: type.h:90
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:429
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:375
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:229
void XLogBeginInsert(void)
Definition: xloginsert.c:136
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:337
#define REGBUF_STANDARD
Definition: xloginsert.h:34

References Assert(), BufferGetPage, BufferIsValid, LVRelState::dead_items, LVRelState::do_index_vacuuming, END_CRIT_SECTION, heap_page_is_all_visible(), InvalidOffsetNumber, InvalidXLogRecPtr, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, VacDeadItems::items, MarkBufferDirty(), MaxHeapTuplesPerPage, LVRelState::nindexes, VacDeadItems::num_items, xl_heap_vacuum::nunused, PageGetItemId, PageIsAllVisible, PageSetAllVisible, PageSetLSN, PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, REGBUF_STANDARD, LVRelState::rel, RelationNeedsWAL, restore_vacuum_error_info(), SizeOfHeapVacuum, START_CRIT_SECTION, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_get_status(), visibilitymap_set(), XLOG_HEAP2_VACUUM, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by lazy_scan_heap(), and lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2082 of file vacuumlazy.c.

2083 {
2084  int index;
2085  BlockNumber vacuumed_pages;
2086  Buffer vmbuffer = InvalidBuffer;
2087  LVSavedErrInfo saved_err_info;
2088 
2089  Assert(vacrel->do_index_vacuuming);
2090  Assert(vacrel->do_index_cleanup);
2091  Assert(vacrel->num_index_scans > 0);
2092 
2093  /* Report that we are now vacuuming the heap */
2096 
2097  /* Update error traceback information */
2098  update_vacuum_error_info(vacrel, &saved_err_info,
2101 
2102  vacuumed_pages = 0;
2103 
2104  index = 0;
2105  while (index < vacrel->dead_items->num_items)
2106  {
2107  BlockNumber tblk;
2108  Buffer buf;
2109  Page page;
2110  Size freespace;
2111 
2113 
2114  tblk = ItemPointerGetBlockNumber(&vacrel->dead_items->items[index]);
2115  vacrel->blkno = tblk;
2116  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, tblk, RBM_NORMAL,
2117  vacrel->bstrategy);
2119  index = lazy_vacuum_heap_page(vacrel, tblk, buf, index, &vmbuffer);
2120 
2121  /* Now that we've vacuumed the page, record its available space */
2122  page = BufferGetPage(buf);
2123  freespace = PageGetHeapFreeSpace(page);
2124 
2126  RecordPageWithFreeSpace(vacrel->rel, tblk, freespace);
2127  vacuumed_pages++;
2128  }
2129 
2130  /* Clear the block number information */
2131  vacrel->blkno = InvalidBlockNumber;
2132 
2133  if (BufferIsValid(vmbuffer))
2134  {
2135  ReleaseBuffer(vmbuffer);
2136  vmbuffer = InvalidBuffer;
2137  }
2138 
2139  /*
2140  * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2141  * the second heap pass. No more, no less.
2142  */
2143  Assert(index > 0);
2144  Assert(vacrel->num_index_scans > 1 ||
2145  (index == vacrel->lpdead_items &&
2146  vacuumed_pages == vacrel->lpdead_item_pages));
2147 
2148  ereport(DEBUG2,
2149  (errmsg("table \"%s\": removed %lld dead item identifiers in %u pages",
2150  vacrel->relname, (long long) index, vacuumed_pages)));
2151 
2152  /* Revert to the previous phase information for error traceback */
2153  restore_vacuum_error_info(vacrel, &saved_err_info);
2154 }
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:32

References Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, BufferIsValid, LVRelState::dead_items, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, ereport, errmsg(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, ItemPointerGetBlockNumber, VacDeadItems::items, lazy_vacuum_heap_page(), LockBuffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, LVRelState::num_index_scans, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelState::rel, ReleaseBuffer(), LVRelState::relname, restore_vacuum_error_info(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), and VACUUM_ERRCB_PHASE_VACUUM_HEAP.

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 2450 of file vacuumlazy.c.

2452 {
2453  IndexVacuumInfo ivinfo;
2454  LVSavedErrInfo saved_err_info;
2455 
2456  ivinfo.index = indrel;
2457  ivinfo.analyze_only = false;
2458  ivinfo.report_progress = false;
2459  ivinfo.estimated_count = true;
2460  ivinfo.message_level = DEBUG2;
2461  ivinfo.num_heap_tuples = reltuples;
2462  ivinfo.strategy = vacrel->bstrategy;
2463 
2464  /*
2465  * Update error traceback information.
2466  *
2467  * The index name is saved during this phase and restored immediately
2468  * after this phase. See vacuum_error_callback.
2469  */
2470  Assert(vacrel->indname == NULL);
2471  vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2472  update_vacuum_error_info(vacrel, &saved_err_info,
2475 
2476  /* Do bulk deletion */
2477  istat = vac_bulkdel_one_index(&ivinfo, istat, (void *) vacrel->dead_items);
2478 
2479  /* Revert to the previous phase information for error traceback */
2480  restore_vacuum_error_info(vacrel, &saved_err_info);
2481  pfree(vacrel->indname);
2482  vacrel->indname = NULL;
2483 
2484  return istat;
2485 }
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, VacDeadItems *dead_items)
Definition: vacuum.c:2275

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, LVRelState::dead_items, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3200 of file vacuumlazy.c.

3202 {
3203  vacrel->blkno = saved_vacrel->blkno;
3204  vacrel->offnum = saved_vacrel->offnum;
3205  vacrel->phase = saved_vacrel->phase;
3206 }
BlockNumber blkno
Definition: vacuumlazy.c:236
VacErrPhase phase
Definition: vacuumlazy.c:238
OffsetNumber offnum
Definition: vacuumlazy.c:237

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 2557 of file vacuumlazy.c.

2558 {
2559  BlockNumber possibly_freeable;
2560 
2561  if (!vacrel->do_rel_truncate || vacrel->failsafe_active)
2562  return false;
2563 
2564  possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
2565  if (possibly_freeable > 0 &&
2566  (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
2567  possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION) &&
2569  return true;
2570  else
2571  return false;
2572 }
int old_snapshot_threshold
Definition: snapmgr.c:78
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:75
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:76

References LVRelState::do_rel_truncate, LVRelState::failsafe_active, LVRelState::nonempty_pages, old_snapshot_threshold, LVRelState::rel_pages, REL_TRUNCATE_FRACTION, and REL_TRUNCATE_MINIMUM.

Referenced by heap_vacuum_rel().

◆ update_index_statistics()

static void update_index_statistics ( LVRelState vacrel)
static

Definition at line 3082 of file vacuumlazy.c.

3083 {
3084  Relation *indrels = vacrel->indrels;
3085  int nindexes = vacrel->nindexes;
3086  IndexBulkDeleteResult **indstats = vacrel->indstats;
3087 
3089 
3090  for (int idx = 0; idx < nindexes; idx++)
3091  {
3092  Relation indrel = indrels[idx];
3093  IndexBulkDeleteResult *istat = indstats[idx];
3094 
3095  if (istat == NULL || istat->estimated_count)
3096  continue;
3097 
3098  /* Update index statistics */
3099  vac_update_relstats(indrel,
3100  istat->num_pages,
3101  istat->num_index_tuples,
3102  0,
3103  false,
3106  false);
3107  }
3108 }
bool estimated_count
Definition: genam.h:77
double num_index_tuples
Definition: genam.h:78
bool IsInParallelMode(void)
Definition: xact.c:1064

References Assert(), IndexBulkDeleteResult::estimated_count, idx(), LVRelState::indrels, LVRelState::indstats, InvalidMultiXactId, InvalidTransactionId, IsInParallelMode(), LVRelState::nindexes, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by lazy_scan_heap().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3181 of file vacuumlazy.c.

3183 {
3184  if (saved_vacrel)
3185  {
3186  saved_vacrel->offnum = vacrel->offnum;
3187  saved_vacrel->blkno = vacrel->blkno;
3188  saved_vacrel->phase = vacrel->phase;
3189  }
3190 
3191  vacrel->blkno = blkno;
3192  vacrel->offnum = offnum;
3193  vacrel->phase = phase;
3194 }

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by heap_vacuum_rel(), lazy_cleanup_one_index(), lazy_scan_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void *  arg)
static

Definition at line 3117 of file vacuumlazy.c.

3118 {
3119  LVRelState *errinfo = arg;
3120 
3121  switch (errinfo->phase)
3122  {
3124  if (BlockNumberIsValid(errinfo->blkno))
3125  {
3126  if (OffsetNumberIsValid(errinfo->offnum))
3127  errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3128  errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3129  else
3130  errcontext("while scanning block %u of relation \"%s.%s\"",
3131  errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3132  }
3133  else
3134  errcontext("while scanning relation \"%s.%s\"",
3135  errinfo->relnamespace, errinfo->relname);
3136  break;
3137 
3139  if (BlockNumberIsValid(errinfo->blkno))
3140  {
3141  if (OffsetNumberIsValid(errinfo->offnum))
3142  errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3143  errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3144  else
3145  errcontext("while vacuuming block %u of relation \"%s.%s\"",
3146  errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3147  }
3148  else
3149  errcontext("while vacuuming relation \"%s.%s\"",
3150  errinfo->relnamespace, errinfo->relname);
3151  break;
3152 
3154  errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3155  errinfo->indname, errinfo->relnamespace, errinfo->relname);
3156  break;
3157 
3159  errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3160  errinfo->indname, errinfo->relnamespace, errinfo->relname);
3161  break;
3162 
3164  if (BlockNumberIsValid(errinfo->blkno))
3165  errcontext("while truncating relation \"%s.%s\" to %u blocks",
3166  errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3167  break;
3168 
3170  default:
3171  return; /* do nothing; the errinfo may not be
3172  * initialized */
3173  }
3174 }
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
#define errcontext
Definition: elog.h:190
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
void * arg

References arg, LVRelState::blkno, BlockNumberIsValid, errcontext, LVRelState::indname, LVRelState::offnum, OffsetNumberIsValid, LVRelState::phase, LVRelState::relname, LVRelState::relnamespace, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().