PostgreSQL Source Code  git master
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/amapi.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "catalog/index.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "optimizer/paths.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "tcop/tcopprot.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVPagePruneState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVPagePruneState LVPagePruneState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel)
 
static BlockNumber lazy_scan_skip (LVRelState *vacrel, Buffer *vmbuffer, BlockNumber next_block, bool *next_unskippable_allvis, bool *skipping_current_range)
 
static bool lazy_scan_new_or_empty (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
 
static void lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, LVPagePruneState *prunestate)
 
static bool lazy_scan_noprune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *hastup, bool *recordfreespace)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static int lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer vmbuffer)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_is_all_visible (LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
static void update_relstats_all_indexes (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static int dead_items_max_items (LVRelState *vacrel)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 94 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 100 of file vacuumlazy.c.

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 128 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 122 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 77 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 76 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 116 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 109 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 86 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 88 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 87 of file vacuumlazy.c.

Typedef Documentation

◆ LVPagePruneState

◆ LVRelState

typedef struct LVRelState LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 131 of file vacuumlazy.c.

132 {
139 } VacErrPhase;
VacErrPhase
Definition: vacuumlazy.c:132
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition: vacuumlazy.c:134
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition: vacuumlazy.c:135
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition: vacuumlazy.c:138
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition: vacuumlazy.c:137
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition: vacuumlazy.c:136
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition: vacuumlazy.c:133

Function Documentation

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool lock_waiter_detected 
)
static

Definition at line 3016 of file vacuumlazy.c.

3017 {
3018  BlockNumber blkno;
3019  BlockNumber prefetchedUntil;
3020  instr_time starttime;
3021 
3022  /* Initialize the starttime if we check for conflicting lock requests */
3023  INSTR_TIME_SET_CURRENT(starttime);
3024 
3025  /*
3026  * Start checking blocks at what we believe relation end to be and move
3027  * backwards. (Strange coding of loop control is needed because blkno is
3028  * unsigned.) To make the scan faster, we prefetch a few blocks at a time
3029  * in forward direction, so that OS-level readahead can kick in.
3030  */
3031  blkno = vacrel->rel_pages;
3033  "prefetch size must be power of 2");
3034  prefetchedUntil = InvalidBlockNumber;
3035  while (blkno > vacrel->nonempty_pages)
3036  {
3037  Buffer buf;
3038  Page page;
3039  OffsetNumber offnum,
3040  maxoff;
3041  bool hastup;
3042 
3043  /*
3044  * Check if another process requests a lock on our relation. We are
3045  * holding an AccessExclusiveLock here, so they will be waiting. We
3046  * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
3047  * only check if that interval has elapsed once every 32 blocks to
3048  * keep the number of system calls and actual shared lock table
3049  * lookups to a minimum.
3050  */
3051  if ((blkno % 32) == 0)
3052  {
3053  instr_time currenttime;
3054  instr_time elapsed;
3055 
3056  INSTR_TIME_SET_CURRENT(currenttime);
3057  elapsed = currenttime;
3058  INSTR_TIME_SUBTRACT(elapsed, starttime);
3059  if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
3061  {
3063  {
3064  ereport(vacrel->verbose ? INFO : DEBUG2,
3065  (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
3066  vacrel->relname)));
3067 
3068  *lock_waiter_detected = true;
3069  return blkno;
3070  }
3071  starttime = currenttime;
3072  }
3073  }
3074 
3075  /*
3076  * We don't insert a vacuum delay point here, because we have an
3077  * exclusive lock on the table which we want to hold for as short a
3078  * time as possible. We still need to check for interrupts however.
3079  */
3081 
3082  blkno--;
3083 
3084  /* If we haven't prefetched this lot yet, do so now. */
3085  if (prefetchedUntil > blkno)
3086  {
3087  BlockNumber prefetchStart;
3088  BlockNumber pblkno;
3089 
3090  prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
3091  for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3092  {
3093  PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
3095  }
3096  prefetchedUntil = prefetchStart;
3097  }
3098 
3099  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
3100  vacrel->bstrategy);
3101 
3102  /* In this phase we only need shared access to the buffer */
3104 
3105  page = BufferGetPage(buf);
3106 
3107  if (PageIsNew(page) || PageIsEmpty(page))
3108  {
3110  continue;
3111  }
3112 
3113  hastup = false;
3114  maxoff = PageGetMaxOffsetNumber(page);
3115  for (offnum = FirstOffsetNumber;
3116  offnum <= maxoff;
3117  offnum = OffsetNumberNext(offnum))
3118  {
3119  ItemId itemid;
3120 
3121  itemid = PageGetItemId(page, offnum);
3122 
3123  /*
3124  * Note: any non-unused item should be taken as a reason to keep
3125  * this page. Even an LP_DEAD item makes truncation unsafe, since
3126  * we must not have cleaned out its index entries.
3127  */
3128  if (ItemIdIsUsed(itemid))
3129  {
3130  hastup = true;
3131  break; /* can stop scanning */
3132  }
3133  } /* scan along page */
3134 
3136 
3137  /* Done scanning if we found a tuple here */
3138  if (hastup)
3139  return blkno + 1;
3140  }
3141 
3142  /*
3143  * If we fall out of the loop, all the previously-thought-to-be-empty
3144  * pages still are; we need not bother to look at the last known-nonempty
3145  * page.
3146  */
3147  return vacrel->nonempty_pages;
3148 }
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:601
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4497
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4715
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:755
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:158
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:350
@ RBM_NORMAL
Definition: bufmgr.h:44
static bool PageIsEmpty(Page page)
Definition: bufpage.h:220
Pointer Page
Definition: bufpage.h:78
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static bool PageIsNew(Page page)
Definition: bufpage.h:230
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:927
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define DEBUG2
Definition: elog.h:29
#define INFO
Definition: elog.h:34
#define ereport(elevel,...)
Definition: elog.h:149
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:194
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:374
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
static char * buf
Definition: pg_test_fsync.c:67
@ MAIN_FORKNUM
Definition: relpath.h:50
bool verbose
Definition: vacuumlazy.c:180
BlockNumber nonempty_pages
Definition: vacuumlazy.c:196
Relation rel
Definition: vacuumlazy.c:144
BlockNumber rel_pages
Definition: vacuumlazy.c:190
BufferAccessStrategy bstrategy
Definition: vacuumlazy.c:149
char * relname
Definition: vacuumlazy.c:175
#define PREFETCH_SIZE
Definition: vacuumlazy.c:122
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:86

References AccessExclusiveLock, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BufferGetPage(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelState::nonempty_pages, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageIsNew(), PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelState::rel, LVRelState::rel_pages, LVRelState::relname, StaticAssertStmt, UnlockReleaseBuffer(), VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and LVRelState::verbose.

Referenced by lazy_truncate_heap().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 3198 of file vacuumlazy.c.

3199 {
3200  VacDeadItems *dead_items;
3201  int max_items;
3202 
3203  max_items = dead_items_max_items(vacrel);
3204  Assert(max_items >= MaxHeapTuplesPerPage);
3205 
3206  /*
3207  * Initialize state for a parallel vacuum. As of now, only one worker can
3208  * be used for an index, so we invoke parallelism only if there are at
3209  * least two indexes on a table.
3210  */
3211  if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
3212  {
3213  /*
3214  * Since parallel workers cannot access data in temporary tables, we
3215  * can't perform parallel vacuum on them.
3216  */
3217  if (RelationUsesLocalBuffers(vacrel->rel))
3218  {
3219  /*
3220  * Give warning only if the user explicitly tries to perform a
3221  * parallel vacuum on the temporary table.
3222  */
3223  if (nworkers > 0)
3224  ereport(WARNING,
3225  (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3226  vacrel->relname)));
3227  }
3228  else
3229  vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
3230  vacrel->nindexes, nworkers,
3231  max_items,
3232  vacrel->verbose ? INFO : DEBUG2,
3233  vacrel->bstrategy);
3234 
3235  /* If parallel mode started, dead_items space is allocated in DSM */
3236  if (ParallelVacuumIsActive(vacrel))
3237  {
3238  vacrel->dead_items = parallel_vacuum_get_dead_items(vacrel->pvs);
3239  return;
3240  }
3241  }
3242 
3243  /* Serial VACUUM case */
3244  dead_items = (VacDeadItems *) palloc(vac_max_items_to_alloc_size(max_items));
3245  dead_items->max_items = max_items;
3246  dead_items->num_items = 0;
3247 
3248  vacrel->dead_items = dead_items;
3249 }
#define WARNING
Definition: elog.h:36
#define MaxHeapTuplesPerPage
Definition: htup_details.h:572
Assert(fmt[strlen(fmt) - 1] !='\n')
void * palloc(Size size)
Definition: mcxt.c:1226
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:638
ParallelVacuumState * pvs
Definition: vacuumlazy.c:150
int nindexes
Definition: vacuumlazy.c:146
Relation * indrels
Definition: vacuumlazy.c:145
VacDeadItems * dead_items
Definition: vacuumlazy.c:189
bool do_index_vacuuming
Definition: vacuumlazy.c:160
int max_items
Definition: vacuum.h:284
int num_items
Definition: vacuum.h:285
Size vac_max_items_to_alloc_size(int max_items)
Definition: vacuum.c:2522
#define ParallelVacuumIsActive(vacrel)
Definition: vacuumlazy.c:128
static int dead_items_max_items(LVRelState *vacrel)
Definition: vacuumlazy.c:3159
VacDeadItems * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int max_items, int elevel, BufferAccessStrategy bstrategy)

References Assert(), LVRelState::bstrategy, LVRelState::dead_items, dead_items_max_items(), DEBUG2, LVRelState::do_index_vacuuming, ereport, errmsg(), LVRelState::indrels, INFO, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::nindexes, VacDeadItems::num_items, palloc(), parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, LVRelState::pvs, LVRelState::rel, RelationUsesLocalBuffers, LVRelState::relname, vac_max_items_to_alloc_size(), LVRelState::verbose, and WARNING.

Referenced by heap_vacuum_rel().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 3255 of file vacuumlazy.c.

3256 {
3257  if (!ParallelVacuumIsActive(vacrel))
3258  {
3259  /* Don't bother with pfree here */
3260  return;
3261  }
3262 
3263  /* End parallel mode */
3264  parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
3265  vacrel->pvs = NULL;
3266 }
IndexBulkDeleteResult ** indstats
Definition: vacuumlazy.c:202
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)

References LVRelState::indstats, parallel_vacuum_end(), ParallelVacuumIsActive, and LVRelState::pvs.

Referenced by heap_vacuum_rel().

◆ dead_items_max_items()

static int dead_items_max_items ( LVRelState vacrel)
static

Definition at line 3159 of file vacuumlazy.c.

3160 {
3161  int64 max_items;
3162  int vac_work_mem = IsAutoVacuumWorkerProcess() &&
3163  autovacuum_work_mem != -1 ?
3165 
3166  if (vacrel->nindexes > 0)
3167  {
3168  BlockNumber rel_pages = vacrel->rel_pages;
3169 
3170  max_items = MAXDEADITEMS(vac_work_mem * 1024L);
3171  max_items = Min(max_items, INT_MAX);
3172  max_items = Min(max_items, MAXDEADITEMS(MaxAllocSize));
3173 
3174  /* curious coding here to ensure the multiplication can't overflow */
3175  if ((BlockNumber) (max_items / MaxHeapTuplesPerPage) > rel_pages)
3176  max_items = rel_pages * MaxHeapTuplesPerPage;
3177 
3178  /* stay sane if small maintenance_work_mem */
3179  max_items = Max(max_items, MaxHeapTuplesPerPage);
3180  }
3181  else
3182  {
3183  /* One-pass case only stores a single heap page's TIDs at a time */
3184  max_items = MaxHeapTuplesPerPage;
3185  }
3186 
3187  return (int) max_items;
3188 }
int autovacuum_work_mem
Definition: autovacuum.c:118
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3397
#define Min(x, y)
Definition: c.h:993
#define Max(x, y)
Definition: c.h:987
int maintenance_work_mem
Definition: globals.c:127
#define MaxAllocSize
Definition: memutils.h:40
#define MAXDEADITEMS(avail_mem)
Definition: vacuum.h:291

References autovacuum_work_mem, IsAutoVacuumWorkerProcess(), maintenance_work_mem, Max, MaxAllocSize, MAXDEADITEMS, MaxHeapTuplesPerPage, Min, LVRelState::nindexes, and LVRelState::rel_pages.

Referenced by dead_items_alloc().

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( LVRelState vacrel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool all_frozen 
)
static

Definition at line 3280 of file vacuumlazy.c.

3283 {
3284  Page page = BufferGetPage(buf);
3286  OffsetNumber offnum,
3287  maxoff;
3288  bool all_visible = true;
3289 
3290  *visibility_cutoff_xid = InvalidTransactionId;
3291  *all_frozen = true;
3292 
3293  maxoff = PageGetMaxOffsetNumber(page);
3294  for (offnum = FirstOffsetNumber;
3295  offnum <= maxoff && all_visible;
3296  offnum = OffsetNumberNext(offnum))
3297  {
3298  ItemId itemid;
3299  HeapTupleData tuple;
3300 
3301  /*
3302  * Set the offset number so that we can display it along with any
3303  * error that occurred while processing this tuple.
3304  */
3305  vacrel->offnum = offnum;
3306  itemid = PageGetItemId(page, offnum);
3307 
3308  /* Unused or redirect line pointers are of no interest */
3309  if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
3310  continue;
3311 
3312  ItemPointerSet(&(tuple.t_self), blockno, offnum);
3313 
3314  /*
3315  * Dead line pointers can have index pointers pointing to them. So
3316  * they can't be treated as visible
3317  */
3318  if (ItemIdIsDead(itemid))
3319  {
3320  all_visible = false;
3321  *all_frozen = false;
3322  break;
3323  }
3324 
3325  Assert(ItemIdIsNormal(itemid));
3326 
3327  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3328  tuple.t_len = ItemIdGetLength(itemid);
3329  tuple.t_tableOid = RelationGetRelid(vacrel->rel);
3330 
3331  switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
3332  buf))
3333  {
3334  case HEAPTUPLE_LIVE:
3335  {
3336  TransactionId xmin;
3337 
3338  /* Check comments in lazy_scan_prune. */
3340  {
3341  all_visible = false;
3342  *all_frozen = false;
3343  break;
3344  }
3345 
3346  /*
3347  * The inserter definitely committed. But is it old enough
3348  * that everyone sees it as committed?
3349  */
3350  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3351  if (!TransactionIdPrecedes(xmin,
3352  vacrel->cutoffs.OldestXmin))
3353  {
3354  all_visible = false;
3355  *all_frozen = false;
3356  break;
3357  }
3358 
3359  /* Track newest xmin on page. */
3360  if (TransactionIdFollows(xmin, *visibility_cutoff_xid) &&
3361  TransactionIdIsNormal(xmin))
3362  *visibility_cutoff_xid = xmin;
3363 
3364  /* Check whether this tuple is already frozen or not */
3365  if (all_visible && *all_frozen &&
3367  *all_frozen = false;
3368  }
3369  break;
3370 
3371  case HEAPTUPLE_DEAD:
3375  {
3376  all_visible = false;
3377  *all_frozen = false;
3378  break;
3379  }
3380  default:
3381  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3382  break;
3383  }
3384  } /* scan along page */
3385 
3386  /* Clear the offset information once we have processed the given page. */
3387  vacrel->offnum = InvalidOffsetNumber;
3388 
3389  return all_visible;
3390 }
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3290
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
uint32 TransactionId
Definition: c.h:641
#define ERROR
Definition: elog.h:39
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7321
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:98
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:99
@ HEAPTUPLE_LIVE
Definition: heapam.h:97
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:100
@ HEAPTUPLE_DEAD
Definition: heapam.h:96
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:309
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:320
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define InvalidOffsetNumber
Definition: off.h:26
#define RelationGetRelid(relation)
Definition: rel.h:504
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
OffsetNumber offnum
Definition: vacuumlazy.c:178
struct VacuumCutoffs cutoffs
Definition: vacuumlazy.c:165
TransactionId OldestXmin
Definition: vacuum.h:265
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:280
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:314
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdIsNormal(xid)
Definition: transam.h:42

References Assert(), buf, BufferGetBlockNumber(), BufferGetPage(), LVRelState::cutoffs, elog(), ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, and TransactionIdPrecedes().

Referenced by lazy_scan_prune(), and lazy_vacuum_heap_page().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 303 of file vacuumlazy.c.

305 {
306  LVRelState *vacrel;
307  bool verbose,
308  instrument,
309  skipwithvm,
310  frozenxid_updated,
311  minmulti_updated;
312  BlockNumber orig_rel_pages,
313  new_rel_pages,
314  new_rel_allvisible;
315  PGRUsage ru0;
316  TimestampTz starttime = 0;
317  PgStat_Counter startreadtime = 0,
318  startwritetime = 0;
319  WalUsage startwalusage = pgWalUsage;
320  int64 StartPageHit = VacuumPageHit,
321  StartPageMiss = VacuumPageMiss,
322  StartPageDirty = VacuumPageDirty;
323  ErrorContextCallback errcallback;
324  char **indnames = NULL;
325 
326  verbose = (params->options & VACOPT_VERBOSE) != 0;
327  instrument = (verbose || (IsAutoVacuumWorkerProcess() &&
328  params->log_min_duration >= 0));
329  if (instrument)
330  {
331  pg_rusage_init(&ru0);
332  starttime = GetCurrentTimestamp();
333  if (track_io_timing)
334  {
335  startreadtime = pgStatBlockReadTime;
336  startwritetime = pgStatBlockWriteTime;
337  }
338  }
339 
341  RelationGetRelid(rel));
342 
343  /*
344  * Setup error traceback support for ereport() first. The idea is to set
345  * up an error context callback to display additional information on any
346  * error during a vacuum. During different phases of vacuum, we update
347  * the state so that the error context callback always display current
348  * information.
349  *
350  * Copy the names of heap rel into local memory for error reporting
351  * purposes, too. It isn't always safe to assume that we can get the name
352  * of each rel. It's convenient for code in lazy_scan_heap to always use
353  * these temp copies.
354  */
355  vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
358  vacrel->relname = pstrdup(RelationGetRelationName(rel));
359  vacrel->indname = NULL;
361  vacrel->verbose = verbose;
362  errcallback.callback = vacuum_error_callback;
363  errcallback.arg = vacrel;
364  errcallback.previous = error_context_stack;
365  error_context_stack = &errcallback;
366 
367  /* Set up high level stuff about rel and its indexes */
368  vacrel->rel = rel;
369  vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes,
370  &vacrel->indrels);
371  vacrel->bstrategy = bstrategy;
372  if (instrument && vacrel->nindexes > 0)
373  {
374  /* Copy index names used by instrumentation (not error reporting) */
375  indnames = palloc(sizeof(char *) * vacrel->nindexes);
376  for (int i = 0; i < vacrel->nindexes; i++)
377  indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
378  }
379 
380  /*
381  * The index_cleanup param either disables index vacuuming and cleanup or
382  * forces it to go ahead when we would otherwise apply the index bypass
383  * optimization. The default is 'auto', which leaves the final decision
384  * up to lazy_vacuum().
385  *
386  * The truncate param allows user to avoid attempting relation truncation,
387  * though it can't force truncation to happen.
388  */
391  params->truncate != VACOPTVALUE_AUTO);
392 
393  /*
394  * While VacuumFailSafeActive is reset to false before calling this, we
395  * still need to reset it here due to recursive calls.
396  */
397  VacuumFailsafeActive = false;
398  vacrel->consider_bypass_optimization = true;
399  vacrel->do_index_vacuuming = true;
400  vacrel->do_index_cleanup = true;
401  vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
402  if (params->index_cleanup == VACOPTVALUE_DISABLED)
403  {
404  /* Force disable index vacuuming up-front */
405  vacrel->do_index_vacuuming = false;
406  vacrel->do_index_cleanup = false;
407  }
408  else if (params->index_cleanup == VACOPTVALUE_ENABLED)
409  {
410  /* Force index vacuuming. Note that failsafe can still bypass. */
411  vacrel->consider_bypass_optimization = false;
412  }
413  else
414  {
415  /* Default/auto, make all decisions dynamically */
417  }
418 
419  /* Initialize page counters explicitly (be tidy) */
420  vacrel->scanned_pages = 0;
421  vacrel->removed_pages = 0;
422  vacrel->frozen_pages = 0;
423  vacrel->lpdead_item_pages = 0;
424  vacrel->missed_dead_pages = 0;
425  vacrel->nonempty_pages = 0;
426  /* dead_items_alloc allocates vacrel->dead_items later on */
427 
428  /* Allocate/initialize output statistics state */
429  vacrel->new_rel_tuples = 0;
430  vacrel->new_live_tuples = 0;
431  vacrel->indstats = (IndexBulkDeleteResult **)
432  palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
433 
434  /* Initialize remaining counters (be tidy) */
435  vacrel->num_index_scans = 0;
436  vacrel->tuples_deleted = 0;
437  vacrel->tuples_frozen = 0;
438  vacrel->lpdead_items = 0;
439  vacrel->live_tuples = 0;
440  vacrel->recently_dead_tuples = 0;
441  vacrel->missed_dead_tuples = 0;
442 
443  /*
444  * Get cutoffs that determine which deleted tuples are considered DEAD,
445  * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
446  * the extent of the blocks that we'll scan in lazy_scan_heap. It has to
447  * happen in this order to ensure that the OldestXmin cutoff field works
448  * as an upper bound on the XIDs stored in the pages we'll actually scan
449  * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
450  *
451  * Next acquire vistest, a related cutoff that's used in heap_page_prune.
452  * We expect vistest will always make heap_page_prune remove any deleted
453  * tuple whose xmax is < OldestXmin. lazy_scan_prune must never become
454  * confused about whether a tuple should be frozen or removed. (In the
455  * future we might want to teach lazy_scan_prune to recompute vistest from
456  * time to time, to increase the number of dead tuples it can prune away.)
457  */
458  vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
459  vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
460  vacrel->vistest = GlobalVisTestFor(rel);
461  /* Initialize state used to track oldest extant XID/MXID */
462  vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
463  vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
464  vacrel->skippedallvis = false;
465  skipwithvm = true;
467  {
468  /*
469  * Force aggressive mode, and disable skipping blocks using the
470  * visibility map (even those set all-frozen)
471  */
472  vacrel->aggressive = true;
473  skipwithvm = false;
474  }
475 
476  vacrel->skipwithvm = skipwithvm;
477 
478  if (verbose)
479  {
480  if (vacrel->aggressive)
481  ereport(INFO,
482  (errmsg("aggressively vacuuming \"%s.%s.%s\"",
483  vacrel->dbname, vacrel->relnamespace,
484  vacrel->relname)));
485  else
486  ereport(INFO,
487  (errmsg("vacuuming \"%s.%s.%s\"",
488  vacrel->dbname, vacrel->relnamespace,
489  vacrel->relname)));
490  }
491 
492  /*
493  * Allocate dead_items array memory using dead_items_alloc. This handles
494  * parallel VACUUM initialization as part of allocating shared memory
495  * space used for dead_items. (But do a failsafe precheck first, to
496  * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
497  * is already dangerously old.)
498  */
500  dead_items_alloc(vacrel, params->nworkers);
501 
502  /*
503  * Call lazy_scan_heap to perform all required heap pruning, index
504  * vacuuming, and heap vacuuming (plus related processing)
505  */
506  lazy_scan_heap(vacrel);
507 
508  /*
509  * Free resources managed by dead_items_alloc. This ends parallel mode in
510  * passing when necessary.
511  */
512  dead_items_cleanup(vacrel);
514 
515  /*
516  * Update pg_class entries for each of rel's indexes where appropriate.
517  *
518  * Unlike the later update to rel's pg_class entry, this is not critical.
519  * Maintains relpages/reltuples statistics used by the planner only.
520  */
521  if (vacrel->do_index_cleanup)
523 
524  /* Done with rel's indexes */
525  vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
526 
527  /* Optionally truncate rel */
528  if (should_attempt_truncation(vacrel))
529  lazy_truncate_heap(vacrel);
530 
531  /* Pop the error context stack */
532  error_context_stack = errcallback.previous;
533 
534  /* Report that we are now doing final cleanup */
537 
538  /*
539  * Prepare to update rel's pg_class entry.
540  *
541  * Aggressive VACUUMs must always be able to advance relfrozenxid to a
542  * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
543  * Non-aggressive VACUUMs may advance them by any amount, or not at all.
544  */
545  Assert(vacrel->NewRelfrozenXid == vacrel->cutoffs.OldestXmin ||
547  vacrel->cutoffs.relfrozenxid,
548  vacrel->NewRelfrozenXid));
549  Assert(vacrel->NewRelminMxid == vacrel->cutoffs.OldestMxact ||
551  vacrel->cutoffs.relminmxid,
552  vacrel->NewRelminMxid));
553  if (vacrel->skippedallvis)
554  {
555  /*
556  * Must keep original relfrozenxid in a non-aggressive VACUUM that
557  * chose to skip an all-visible page range. The state that tracks new
558  * values will have missed unfrozen XIDs from the pages we skipped.
559  */
560  Assert(!vacrel->aggressive);
563  }
564 
565  /*
566  * For safety, clamp relallvisible to be not more than what we're setting
567  * pg_class.relpages to
568  */
569  new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
570  visibilitymap_count(rel, &new_rel_allvisible, NULL);
571  if (new_rel_allvisible > new_rel_pages)
572  new_rel_allvisible = new_rel_pages;
573 
574  /*
575  * Now actually update rel's pg_class entry.
576  *
577  * In principle new_live_tuples could be -1 indicating that we (still)
578  * don't know the tuple count. In practice that can't happen, since we
579  * scan every page that isn't skipped using the visibility map.
580  */
581  vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
582  new_rel_allvisible, vacrel->nindexes > 0,
583  vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
584  &frozenxid_updated, &minmulti_updated, false);
585 
586  /*
587  * Report results to the cumulative stats system, too.
588  *
589  * Deliberately avoid telling the stats system about LP_DEAD items that
590  * remain in the table due to VACUUM bypassing index and heap vacuuming.
591  * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
592  * It seems like a good idea to err on the side of not vacuuming again too
593  * soon in cases where the failsafe prevented significant amounts of heap
594  * vacuuming.
595  */
597  rel->rd_rel->relisshared,
598  Max(vacrel->new_live_tuples, 0),
599  vacrel->recently_dead_tuples +
600  vacrel->missed_dead_tuples);
602 
603  if (instrument)
604  {
605  TimestampTz endtime = GetCurrentTimestamp();
606 
607  if (verbose || params->log_min_duration == 0 ||
608  TimestampDifferenceExceeds(starttime, endtime,
609  params->log_min_duration))
610  {
611  long secs_dur;
612  int usecs_dur;
613  WalUsage walusage;
615  char *msgfmt;
616  int32 diff;
617  int64 PageHitOp = VacuumPageHit - StartPageHit,
618  PageMissOp = VacuumPageMiss - StartPageMiss,
619  PageDirtyOp = VacuumPageDirty - StartPageDirty;
620  double read_rate = 0,
621  write_rate = 0;
622 
623  TimestampDifference(starttime, endtime, &secs_dur, &usecs_dur);
624  memset(&walusage, 0, sizeof(WalUsage));
625  WalUsageAccumDiff(&walusage, &pgWalUsage, &startwalusage);
626 
628  if (verbose)
629  {
630  /*
631  * Aggressiveness already reported earlier, in dedicated
632  * VACUUM VERBOSE ereport
633  */
634  Assert(!params->is_wraparound);
635  msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
636  }
637  else if (params->is_wraparound)
638  {
639  /*
640  * While it's possible for a VACUUM to be both is_wraparound
641  * and !aggressive, that's just a corner-case -- is_wraparound
642  * implies aggressive. Produce distinct output for the corner
643  * case all the same, just in case.
644  */
645  if (vacrel->aggressive)
646  msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
647  else
648  msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
649  }
650  else
651  {
652  if (vacrel->aggressive)
653  msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
654  else
655  msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
656  }
657  appendStringInfo(&buf, msgfmt,
658  vacrel->dbname,
659  vacrel->relnamespace,
660  vacrel->relname,
661  vacrel->num_index_scans);
662  appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total)\n"),
663  vacrel->removed_pages,
664  new_rel_pages,
665  vacrel->scanned_pages,
666  orig_rel_pages == 0 ? 100.0 :
667  100.0 * vacrel->scanned_pages / orig_rel_pages);
669  _("tuples: %lld removed, %lld remain, %lld are dead but not yet removable\n"),
670  (long long) vacrel->tuples_deleted,
671  (long long) vacrel->new_rel_tuples,
672  (long long) vacrel->recently_dead_tuples);
673  if (vacrel->missed_dead_tuples > 0)
675  _("tuples missed: %lld dead from %u pages not removed due to cleanup lock contention\n"),
676  (long long) vacrel->missed_dead_tuples,
677  vacrel->missed_dead_pages);
678  diff = (int32) (ReadNextTransactionId() -
679  vacrel->cutoffs.OldestXmin);
681  _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
682  vacrel->cutoffs.OldestXmin, diff);
683  if (frozenxid_updated)
684  {
685  diff = (int32) (vacrel->NewRelfrozenXid -
686  vacrel->cutoffs.relfrozenxid);
688  _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
689  vacrel->NewRelfrozenXid, diff);
690  }
691  if (minmulti_updated)
692  {
693  diff = (int32) (vacrel->NewRelminMxid -
694  vacrel->cutoffs.relminmxid);
696  _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
697  vacrel->NewRelminMxid, diff);
698  }
699  appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %lld tuples frozen\n"),
700  vacrel->frozen_pages,
701  orig_rel_pages == 0 ? 100.0 :
702  100.0 * vacrel->frozen_pages / orig_rel_pages,
703  (long long) vacrel->tuples_frozen);
704  if (vacrel->do_index_vacuuming)
705  {
706  if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
707  appendStringInfoString(&buf, _("index scan not needed: "));
708  else
709  appendStringInfoString(&buf, _("index scan needed: "));
710 
711  msgfmt = _("%u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n");
712  }
713  else
714  {
716  appendStringInfoString(&buf, _("index scan bypassed: "));
717  else
718  appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
719 
720  msgfmt = _("%u pages from table (%.2f%% of total) have %lld dead item identifiers\n");
721  }
722  appendStringInfo(&buf, msgfmt,
723  vacrel->lpdead_item_pages,
724  orig_rel_pages == 0 ? 100.0 :
725  100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
726  (long long) vacrel->lpdead_items);
727  for (int i = 0; i < vacrel->nindexes; i++)
728  {
729  IndexBulkDeleteResult *istat = vacrel->indstats[i];
730 
731  if (!istat)
732  continue;
733 
735  _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
736  indnames[i],
737  istat->num_pages,
738  istat->pages_newly_deleted,
739  istat->pages_deleted,
740  istat->pages_free);
741  }
742  if (track_io_timing)
743  {
744  double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
745  double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
746 
747  appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
748  read_ms, write_ms);
749  }
750  if (secs_dur > 0 || usecs_dur > 0)
751  {
752  read_rate = (double) BLCKSZ * PageMissOp / (1024 * 1024) /
753  (secs_dur + usecs_dur / 1000000.0);
754  write_rate = (double) BLCKSZ * PageDirtyOp / (1024 * 1024) /
755  (secs_dur + usecs_dur / 1000000.0);
756  }
757  appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
758  read_rate, write_rate);
760  _("buffer usage: %lld hits, %lld misses, %lld dirtied\n"),
761  (long long) PageHitOp,
762  (long long) PageMissOp,
763  (long long) PageDirtyOp);
765  _("WAL usage: %lld records, %lld full page images, %llu bytes\n"),
766  (long long) walusage.wal_records,
767  (long long) walusage.wal_fpi,
768  (unsigned long long) walusage.wal_bytes);
769  appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
770 
771  ereport(verbose ? INFO : LOG,
772  (errmsg_internal("%s", buf.data)));
773  pfree(buf.data);
774  }
775  }
776 
777  /* Cleanup index statistics and index names */
778  for (int i = 0; i < vacrel->nindexes; i++)
779  {
780  if (vacrel->indstats[i])
781  pfree(vacrel->indstats[i]);
782 
783  if (instrument)
784  pfree(indnames[i]);
785  }
786 }
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1659
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1719
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1583
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
bool track_io_timing
Definition: bufmgr.c:138
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:227
signed int int32
Definition: c.h:483
int64 TimestampTz
Definition: timestamp.h:39
char * get_database_name(Oid dbid)
Definition: dbcommands.c:3084
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1156
ErrorContextCallback * error_context_stack
Definition: elog.c:95
#define _(x)
Definition: elog.c:91
#define LOG
Definition: elog.h:31
int64 VacuumPageHit
Definition: globals.c:151
int64 VacuumPageMiss
Definition: globals.c:152
int64 VacuumPageDirty
Definition: globals.c:153
Oid MyDatabaseId
Definition: globals.c:89
int verbose
WalUsage pgWalUsage
Definition: instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition: instrument.c:280
int i
Definition: isn.c:73
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3348
char * pstrdup(const char *in)
Definition: mcxt.c:1644
void pfree(void *pointer)
Definition: mcxt.c:1456
void * palloc0(Size size)
Definition: mcxt.c:1257
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3170
#define InvalidMultiXactId
Definition: multixact.h:24
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int64 PgStat_Counter
Definition: pgstat.h:89
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4011
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:37
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define RelationGetRelationName(relation)
Definition: rel.h:538
#define RelationGetNamespace(relation)
Definition: rel.h:545
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:176
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
struct ErrorContextCallback * previous
Definition: elog.h:295
void(* callback)(void *arg)
Definition: elog.h:296
BlockNumber pages_deleted
Definition: genam.h:82
BlockNumber pages_newly_deleted
Definition: genam.h:81
BlockNumber pages_free
Definition: genam.h:83
BlockNumber num_pages
Definition: genam.h:77
int64 tuples_deleted
Definition: vacuumlazy.c:207
bool do_rel_truncate
Definition: vacuumlazy.c:162
BlockNumber scanned_pages
Definition: vacuumlazy.c:191
bool aggressive
Definition: vacuumlazy.c:153
GlobalVisState * vistest
Definition: vacuumlazy.c:166
BlockNumber removed_pages
Definition: vacuumlazy.c:192
int num_index_scans
Definition: vacuumlazy.c:205
double new_live_tuples
Definition: vacuumlazy.c:200
double new_rel_tuples
Definition: vacuumlazy.c:199
TransactionId NewRelfrozenXid
Definition: vacuumlazy.c:168
bool consider_bypass_optimization
Definition: vacuumlazy.c:157
int64 recently_dead_tuples
Definition: vacuumlazy.c:211
int64 tuples_frozen
Definition: vacuumlazy.c:208
BlockNumber frozen_pages
Definition: vacuumlazy.c:193
char * dbname
Definition: vacuumlazy.c:173
BlockNumber missed_dead_pages
Definition: vacuumlazy.c:195
char * relnamespace
Definition: vacuumlazy.c:174
int64 live_tuples
Definition: vacuumlazy.c:210
int64 lpdead_items
Definition: vacuumlazy.c:209
bool skippedallvis
Definition: vacuumlazy.c:170
BlockNumber lpdead_item_pages
Definition: vacuumlazy.c:194
bool skipwithvm
Definition: vacuumlazy.c:155
bool do_index_cleanup
Definition: vacuumlazy.c:161
MultiXactId NewRelminMxid
Definition: vacuumlazy.c:169
int64 missed_dead_tuples
Definition: vacuumlazy.c:212
VacErrPhase phase
Definition: vacuumlazy.c:179
char * indname
Definition: vacuumlazy.c:176
Form_pg_class rd_rel
Definition: rel.h:111
TransactionId FreezeLimit
Definition: vacuum.h:275
TransactionId relfrozenxid
Definition: vacuum.h:249
MultiXactId relminmxid
Definition: vacuum.h:250
MultiXactId MultiXactCutoff
Definition: vacuum.h:276
MultiXactId OldestMxact
Definition: vacuum.h:266
int nworkers
Definition: vacuum.h:237
VacOptValue truncate
Definition: vacuum.h:230
bits32 options
Definition: vacuum.h:218
bool is_wraparound
Definition: vacuum.h:225
int log_min_duration
Definition: vacuum.h:226
VacOptValue index_cleanup
Definition: vacuum.h:229
uint64 wal_bytes
Definition: instrument.h:53
int64 wal_fpi
Definition: instrument.h:52
int64 wal_records
Definition: instrument.h:51
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:299
static TransactionId ReadNextTransactionId(void)
Definition: transam.h:315
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:2258
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
Definition: vacuum.c:1401
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:2301
bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params, struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1075
bool VacuumFailsafeActive
Definition: vacuum.c:98
#define VACOPT_VERBOSE
Definition: vacuum.h:181
@ VACOPTVALUE_AUTO
Definition: vacuum.h:202
@ VACOPTVALUE_ENABLED
Definition: vacuum.h:204
@ VACOPTVALUE_UNSPECIFIED
Definition: vacuum.h:201
@ VACOPTVALUE_DISABLED
Definition: vacuum.h:203
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition: vacuum.h:187
static void dead_items_cleanup(LVRelState *vacrel)
Definition: vacuumlazy.c:3255
static void update_relstats_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3396
static void vacuum_error_callback(void *arg)
Definition: vacuumlazy.c:3431
static void lazy_truncate_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:2885
static bool should_attempt_truncation(LVRelState *vacrel)
Definition: vacuumlazy.c:2865
static void lazy_scan_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:825
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
Definition: vacuumlazy.c:2636
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
Definition: vacuumlazy.c:3198
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
bool IsInParallelMode(void)
Definition: xact.c:1069

References _, LVRelState::aggressive, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert(), LVRelState::bstrategy, buf, ErrorContextCallback::callback, LVRelState::consider_bypass_optimization, LVRelState::cutoffs, LVRelState::dbname, dead_items_alloc(), dead_items_cleanup(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errmsg(), errmsg_internal(), error_context_stack, VacuumCutoffs::FreezeLimit, LVRelState::frozen_pages, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), GlobalVisTestFor(), i, VacuumParams::index_cleanup, LVRelState::indname, LVRelState::indrels, LVRelState::indstats, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsAutoVacuumWorkerProcess(), IsInParallelMode(), lazy_check_wraparound_failsafe(), lazy_scan_heap(), lazy_truncate_heap(), LVRelState::live_tuples, LOG, VacuumParams::log_min_duration, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, Max, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, VacuumCutoffs::MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyDatabaseId, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, NoLock, LVRelState::nonempty_pages, LVRelState::num_index_scans, IndexBulkDeleteResult::num_pages, VacuumParams::nworkers, VacuumCutoffs::OldestMxact, VacuumCutoffs::OldestXmin, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, LVRelState::phase, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, pstrdup(), RelationData::rd_rel, ReadNextTransactionId(), LVRelState::recently_dead_tuples, LVRelState::rel, LVRelState::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, LVRelState::relname, LVRelState::relnamespace, LVRelState::removed_pages, RowExclusiveLock, LVRelState::scanned_pages, should_attempt_truncation(), LVRelState::skippedallvis, LVRelState::skipwithvm, TimestampDifference(), TimestampDifferenceExceeds(), track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelState::tuples_deleted, LVRelState::tuples_frozen, update_relstats_all_indexes(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_get_cutoffs(), VacuumFailsafeActive, VacuumPageDirty, VacuumPageHit, VacuumPageMiss, LVRelState::verbose, verbose, visibilitymap_count(), LVRelState::vistest, WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_records, and WalUsageAccumDiff().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 2636 of file vacuumlazy.c.

2637 {
2638  /* Don't warn more than once per VACUUM */
2640  return true;
2641 
2643  {
2644  const int progress_index[] = {
2647  };
2648  int64 progress_val[2] = {0, 0};
2649 
2650  VacuumFailsafeActive = true;
2651 
2652  /*
2653  * Abandon use of a buffer access strategy to allow use of all of
2654  * shared buffers. We assume the caller who allocated the memory for
2655  * the BufferAccessStrategy will free it.
2656  */
2657  vacrel->bstrategy = NULL;
2658 
2659  /* Disable index vacuuming, index cleanup, and heap rel truncation */
2660  vacrel->do_index_vacuuming = false;
2661  vacrel->do_index_cleanup = false;
2662  vacrel->do_rel_truncate = false;
2663 
2664  /* Reset the progress counters */
2665  pgstat_progress_update_multi_param(2, progress_index, progress_val);
2666 
2667  ereport(WARNING,
2668  (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2669  vacrel->dbname, vacrel->relnamespace, vacrel->relname,
2670  vacrel->num_index_scans),
2671  errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
2672  errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2673  "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2674 
2675  /* Stop applying cost limits from this point on */
2676  VacuumCostActive = false;
2677  VacuumCostBalance = 0;
2678 
2679  return true;
2680  }
2681 
2682  return false;
2683 }
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
#define unlikely(x)
Definition: c.h:300
int errdetail(const char *fmt,...)
Definition: elog.c:1202
int errhint(const char *fmt,...)
Definition: elog.c:1316
bool VacuumCostActive
Definition: globals.c:156
int VacuumCostBalance
Definition: globals.c:155
#define PROGRESS_VACUUM_INDEXES_PROCESSED
Definition: progress.h:29
#define PROGRESS_VACUUM_INDEXES_TOTAL
Definition: progress.h:28
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1243

References LVRelState::bstrategy, LVRelState::cutoffs, LVRelState::dbname, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errdetail(), errhint(), errmsg(), LVRelState::num_index_scans, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, LVRelState::relname, LVRelState::relnamespace, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, VacuumFailsafeActive, and WARNING.

Referenced by heap_vacuum_rel(), lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 2689 of file vacuumlazy.c.

2690 {
2691  double reltuples = vacrel->new_rel_tuples;
2692  bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
2693  const int progress_start_index[] = {
2696  };
2697  const int progress_end_index[] = {
2700  };
2701  int64 progress_start_val[2];
2702  int64 progress_end_val[2] = {0, 0};
2703 
2704  Assert(vacrel->do_index_cleanup);
2705  Assert(vacrel->nindexes > 0);
2706 
2707  /*
2708  * Report that we are now cleaning up indexes and the number of indexes to
2709  * cleanup.
2710  */
2711  progress_start_val[0] = PROGRESS_VACUUM_PHASE_INDEX_CLEANUP;
2712  progress_start_val[1] = vacrel->nindexes;
2713  pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
2714 
2715  if (!ParallelVacuumIsActive(vacrel))
2716  {
2717  for (int idx = 0; idx < vacrel->nindexes; idx++)
2718  {
2719  Relation indrel = vacrel->indrels[idx];
2720  IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2721 
2722  vacrel->indstats[idx] =
2723  lazy_cleanup_one_index(indrel, istat, reltuples,
2724  estimated_count, vacrel);
2725 
2726  /* Report the number of indexes cleaned up */
2728  idx + 1);
2729  }
2730  }
2731  else
2732  {
2733  /* Outsource everything to parallel variant */
2734  parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
2735  vacrel->num_index_scans,
2736  estimated_count);
2737  }
2738 
2739  /* Reset the progress counters */
2740  pgstat_progress_update_multi_param(2, progress_end_index, progress_end_val);
2741 }
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:35
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
Definition: vacuumlazy.c:2805
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)

References Assert(), LVRelState::do_index_cleanup, idx(), LVRelState::indrels, LVRelState::indstats, lazy_cleanup_one_index(), LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::num_index_scans, parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelState::pvs, LVRelState::rel_pages, and LVRelState::scanned_pages.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 2805 of file vacuumlazy.c.

2808 {
2809  IndexVacuumInfo ivinfo;
2810  LVSavedErrInfo saved_err_info;
2811 
2812  ivinfo.index = indrel;
2813  ivinfo.heaprel = vacrel->rel;
2814  ivinfo.analyze_only = false;
2815  ivinfo.report_progress = false;
2816  ivinfo.estimated_count = estimated_count;
2817  ivinfo.message_level = DEBUG2;
2818 
2819  ivinfo.num_heap_tuples = reltuples;
2820  ivinfo.strategy = vacrel->bstrategy;
2821 
2822  /*
2823  * Update error traceback information.
2824  *
2825  * The index name is saved during this phase and restored immediately
2826  * after this phase. See vacuum_error_callback.
2827  */
2828  Assert(vacrel->indname == NULL);
2829  vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2830  update_vacuum_error_info(vacrel, &saved_err_info,
2833 
2834  istat = vac_cleanup_one_index(&ivinfo, istat);
2835 
2836  /* Revert to the previous phase information for error traceback */
2837  restore_vacuum_error_info(vacrel, &saved_err_info);
2838  pfree(vacrel->indname);
2839  vacrel->indname = NULL;
2840 
2841  return istat;
2842 }
Relation index
Definition: genam.h:46
double num_heap_tuples
Definition: genam.h:52
bool analyze_only
Definition: genam.h:48
BufferAccessStrategy strategy
Definition: genam.h:53
Relation heaprel
Definition: genam.h:47
bool report_progress
Definition: genam.h:49
int message_level
Definition: genam.h:51
bool estimated_count
Definition: genam.h:50
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition: vacuum.c:2497
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
Definition: vacuumlazy.c:3514
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
Definition: vacuumlazy.c:3495

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel)
static

Definition at line 825 of file vacuumlazy.c.

826 {
827  BlockNumber rel_pages = vacrel->rel_pages,
828  blkno,
829  next_unskippable_block,
830  next_fsm_block_to_vacuum = 0;
831  VacDeadItems *dead_items = vacrel->dead_items;
832  Buffer vmbuffer = InvalidBuffer;
833  bool next_unskippable_allvis,
834  skipping_current_range;
835  const int initprog_index[] = {
839  };
840  int64 initprog_val[3];
841 
842  /* Report that we're scanning the heap, advertising total # of blocks */
843  initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
844  initprog_val[1] = rel_pages;
845  initprog_val[2] = dead_items->max_items;
846  pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
847 
848  /* Set up an initial range of skippable blocks using the visibility map */
849  next_unskippable_block = lazy_scan_skip(vacrel, &vmbuffer, 0,
850  &next_unskippable_allvis,
851  &skipping_current_range);
852  for (blkno = 0; blkno < rel_pages; blkno++)
853  {
854  Buffer buf;
855  Page page;
856  bool all_visible_according_to_vm;
857  LVPagePruneState prunestate;
858 
859  if (blkno == next_unskippable_block)
860  {
861  /*
862  * Can't skip this page safely. Must scan the page. But
863  * determine the next skippable range after the page first.
864  */
865  all_visible_according_to_vm = next_unskippable_allvis;
866  next_unskippable_block = lazy_scan_skip(vacrel, &vmbuffer,
867  blkno + 1,
868  &next_unskippable_allvis,
869  &skipping_current_range);
870 
871  Assert(next_unskippable_block >= blkno + 1);
872  }
873  else
874  {
875  /* Last page always scanned (may need to set nonempty_pages) */
876  Assert(blkno < rel_pages - 1);
877 
878  if (skipping_current_range)
879  continue;
880 
881  /* Current range is too small to skip -- just scan the page */
882  all_visible_according_to_vm = true;
883  }
884 
885  vacrel->scanned_pages++;
886 
887  /* Report as block scanned, update error traceback information */
890  blkno, InvalidOffsetNumber);
891 
893 
894  /*
895  * Regularly check if wraparound failsafe should trigger.
896  *
897  * There is a similar check inside lazy_vacuum_all_indexes(), but
898  * relfrozenxid might start to look dangerously old before we reach
899  * that point. This check also provides failsafe coverage for the
900  * one-pass strategy, and the two-pass strategy with the index_cleanup
901  * param set to 'off'.
902  */
903  if (vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0)
905 
906  /*
907  * Consider if we definitely have enough space to process TIDs on page
908  * already. If we are close to overrunning the available space for
909  * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
910  * this page.
911  */
912  Assert(dead_items->max_items >= MaxHeapTuplesPerPage);
913  if (dead_items->max_items - dead_items->num_items < MaxHeapTuplesPerPage)
914  {
915  /*
916  * Before beginning index vacuuming, we release any pin we may
917  * hold on the visibility map page. This isn't necessary for
918  * correctness, but we do it anyway to avoid holding the pin
919  * across a lengthy, unrelated operation.
920  */
921  if (BufferIsValid(vmbuffer))
922  {
923  ReleaseBuffer(vmbuffer);
924  vmbuffer = InvalidBuffer;
925  }
926 
927  /* Perform a round of index and heap vacuuming */
928  vacrel->consider_bypass_optimization = false;
929  lazy_vacuum(vacrel);
930 
931  /*
932  * Vacuum the Free Space Map to make newly-freed space visible on
933  * upper-level FSM pages. Note we have not yet processed blkno.
934  */
935  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
936  blkno);
937  next_fsm_block_to_vacuum = blkno;
938 
939  /* Report that we are once again scanning the heap */
942  }
943 
944  /*
945  * Pin the visibility map page in case we need to mark the page
946  * all-visible. In most cases this will be very cheap, because we'll
947  * already have the correct page pinned anyway.
948  */
949  visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
950 
951  /*
952  * We need a buffer cleanup lock to prune HOT chains and defragment
953  * the page in lazy_scan_prune. But when it's not possible to acquire
954  * a cleanup lock right away, we may be able to settle for reduced
955  * processing using lazy_scan_noprune.
956  */
957  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
958  vacrel->bstrategy);
959  page = BufferGetPage(buf);
961  {
962  bool hastup,
963  recordfreespace;
964 
966 
967  /* Check for new or empty pages before lazy_scan_noprune call */
968  if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, true,
969  vmbuffer))
970  {
971  /* Processed as new/empty page (lock and pin released) */
972  continue;
973  }
974 
975  /* Collect LP_DEAD items in dead_items array, count tuples */
976  if (lazy_scan_noprune(vacrel, buf, blkno, page, &hastup,
977  &recordfreespace))
978  {
979  Size freespace = 0;
980 
981  /*
982  * Processed page successfully (without cleanup lock) -- just
983  * need to perform rel truncation and FSM steps, much like the
984  * lazy_scan_prune case. Don't bother trying to match its
985  * visibility map setting steps, though.
986  */
987  if (hastup)
988  vacrel->nonempty_pages = blkno + 1;
989  if (recordfreespace)
990  freespace = PageGetHeapFreeSpace(page);
992  if (recordfreespace)
993  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
994  continue;
995  }
996 
997  /*
998  * lazy_scan_noprune could not do all required processing. Wait
999  * for a cleanup lock, and call lazy_scan_prune in the usual way.
1000  */
1001  Assert(vacrel->aggressive);
1004  }
1005 
1006  /* Check for new or empty pages before lazy_scan_prune call */
1007  if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, false, vmbuffer))
1008  {
1009  /* Processed as new/empty page (lock and pin released) */
1010  continue;
1011  }
1012 
1013  /*
1014  * Prune, freeze, and count tuples.
1015  *
1016  * Accumulates details of remaining LP_DEAD line pointers on page in
1017  * dead_items array. This includes LP_DEAD line pointers that we
1018  * pruned ourselves, as well as existing LP_DEAD line pointers that
1019  * were pruned some time earlier. Also considers freezing XIDs in the
1020  * tuple headers of remaining items with storage.
1021  */
1022  lazy_scan_prune(vacrel, buf, blkno, page, &prunestate);
1023 
1024  Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
1025 
1026  /* Remember the location of the last page with nonremovable tuples */
1027  if (prunestate.hastup)
1028  vacrel->nonempty_pages = blkno + 1;
1029 
1030  if (vacrel->nindexes == 0)
1031  {
1032  /*
1033  * Consider the need to do page-at-a-time heap vacuuming when
1034  * using the one-pass strategy now.
1035  *
1036  * The one-pass strategy will never call lazy_vacuum(). The steps
1037  * performed here can be thought of as the one-pass equivalent of
1038  * a call to lazy_vacuum().
1039  */
1040  if (prunestate.has_lpdead_items)
1041  {
1042  Size freespace;
1043 
1044  lazy_vacuum_heap_page(vacrel, blkno, buf, 0, vmbuffer);
1045 
1046  /* Forget the LP_DEAD items that we just vacuumed */
1047  dead_items->num_items = 0;
1048 
1049  /*
1050  * Periodically perform FSM vacuuming to make newly-freed
1051  * space visible on upper FSM pages. Note we have not yet
1052  * performed FSM processing for blkno.
1053  */
1054  if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1055  {
1056  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1057  blkno);
1058  next_fsm_block_to_vacuum = blkno;
1059  }
1060 
1061  /*
1062  * Now perform FSM processing for blkno, and move on to next
1063  * page.
1064  *
1065  * Our call to lazy_vacuum_heap_page() will have considered if
1066  * it's possible to set all_visible/all_frozen independently
1067  * of lazy_scan_prune(). Note that prunestate was invalidated
1068  * by lazy_vacuum_heap_page() call.
1069  */
1070  freespace = PageGetHeapFreeSpace(page);
1071 
1073  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1074  continue;
1075  }
1076 
1077  /*
1078  * There was no call to lazy_vacuum_heap_page() because pruning
1079  * didn't encounter/create any LP_DEAD items that needed to be
1080  * vacuumed. Prune state has not been invalidated, so proceed
1081  * with prunestate-driven visibility map and FSM steps (just like
1082  * the two-pass strategy).
1083  */
1084  Assert(dead_items->num_items == 0);
1085  }
1086 
1087  /*
1088  * Handle setting visibility map bit based on information from the VM
1089  * (as of last lazy_scan_skip() call), and from prunestate
1090  */
1091  if (!all_visible_according_to_vm && prunestate.all_visible)
1092  {
1094 
1095  if (prunestate.all_frozen)
1096  {
1098  flags |= VISIBILITYMAP_ALL_FROZEN;
1099  }
1100 
1101  /*
1102  * It should never be the case that the visibility map page is set
1103  * while the page-level bit is clear, but the reverse is allowed
1104  * (if checksums are not enabled). Regardless, set both bits so
1105  * that we get back in sync.
1106  *
1107  * NB: If the heap page is all-visible but the VM bit is not set,
1108  * we don't need to dirty the heap page. However, if checksums
1109  * are enabled, we do need to make sure that the heap page is
1110  * dirtied before passing it to visibilitymap_set(), because it
1111  * may be logged. Given that this situation should only happen in
1112  * rare cases after a crash, it is not worth optimizing.
1113  */
1114  PageSetAllVisible(page);
1116  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1117  vmbuffer, prunestate.visibility_cutoff_xid,
1118  flags);
1119  }
1120 
1121  /*
1122  * As of PostgreSQL 9.2, the visibility map bit should never be set if
1123  * the page-level bit is clear. However, it's possible that the bit
1124  * got cleared after lazy_scan_skip() was called, so we must recheck
1125  * with buffer lock before concluding that the VM is corrupt.
1126  */
1127  else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
1128  visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
1129  {
1130  elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1131  vacrel->relname, blkno);
1132  visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1134  }
1135 
1136  /*
1137  * It's possible for the value returned by
1138  * GetOldestNonRemovableTransactionId() to move backwards, so it's not
1139  * wrong for us to see tuples that appear to not be visible to
1140  * everyone yet, while PD_ALL_VISIBLE is already set. The real safe
1141  * xmin value never moves backwards, but
1142  * GetOldestNonRemovableTransactionId() is conservative and sometimes
1143  * returns a value that's unnecessarily small, so if we see that
1144  * contradiction it just means that the tuples that we think are not
1145  * visible to everyone yet actually are, and the PD_ALL_VISIBLE flag
1146  * is correct.
1147  *
1148  * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE
1149  * set, however.
1150  */
1151  else if (prunestate.has_lpdead_items && PageIsAllVisible(page))
1152  {
1153  elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
1154  vacrel->relname, blkno);
1155  PageClearAllVisible(page);
1157  visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1159  }
1160 
1161  /*
1162  * If the all-visible page is all-frozen but not marked as such yet,
1163  * mark it as all-frozen. Note that all_frozen is only valid if
1164  * all_visible is true, so we must check both prunestate fields.
1165  */
1166  else if (all_visible_according_to_vm && prunestate.all_visible &&
1167  prunestate.all_frozen &&
1168  !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
1169  {
1170  /*
1171  * Avoid relying on all_visible_according_to_vm as a proxy for the
1172  * page-level PD_ALL_VISIBLE bit being set, since it might have
1173  * become stale -- even when all_visible is set in prunestate
1174  */
1175  if (!PageIsAllVisible(page))
1176  {
1177  PageSetAllVisible(page);
1179  }
1180 
1181  /*
1182  * Set the page all-frozen (and all-visible) in the VM.
1183  *
1184  * We can pass InvalidTransactionId as our visibility_cutoff_xid,
1185  * since a snapshotConflictHorizon sufficient to make everything
1186  * safe for REDO was logged when the page's tuples were frozen.
1187  */
1189  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1190  vmbuffer, InvalidTransactionId,
1193  }
1194 
1195  /*
1196  * Final steps for block: drop cleanup lock, record free space in the
1197  * FSM
1198  */
1199  if (prunestate.has_lpdead_items && vacrel->do_index_vacuuming)
1200  {
1201  /*
1202  * Wait until lazy_vacuum_heap_rel() to save free space. This
1203  * doesn't just save us some cycles; it also allows us to record
1204  * any additional free space that lazy_vacuum_heap_page() will
1205  * make available in cases where it's possible to truncate the
1206  * page's line pointer array.
1207  *
1208  * Note: It's not in fact 100% certain that we really will call
1209  * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip
1210  * index vacuuming (and so must skip heap vacuuming). This is
1211  * deemed okay because it only happens in emergencies, or when
1212  * there is very little free space anyway. (Besides, we start
1213  * recording free space in the FSM once index vacuuming has been
1214  * abandoned.)
1215  *
1216  * Note: The one-pass (no indexes) case is only supposed to make
1217  * it this far when there were no LP_DEAD items during pruning.
1218  */
1219  Assert(vacrel->nindexes > 0);
1221  }
1222  else
1223  {
1224  Size freespace = PageGetHeapFreeSpace(page);
1225 
1227  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1228  }
1229  }
1230 
1231  vacrel->blkno = InvalidBlockNumber;
1232  if (BufferIsValid(vmbuffer))
1233  ReleaseBuffer(vmbuffer);
1234 
1235  /* report that everything is now scanned */
1237 
1238  /* now we can compute the new value for pg_class.reltuples */
1239  vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1240  vacrel->scanned_pages,
1241  vacrel->live_tuples);
1242 
1243  /*
1244  * Also compute the total number of surviving heap entries. In the
1245  * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1246  */
1247  vacrel->new_rel_tuples =
1248  Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1249  vacrel->missed_dead_tuples;
1250 
1251  /*
1252  * Do index vacuuming (call each index's ambulkdelete routine), then do
1253  * related heap vacuuming
1254  */
1255  if (dead_items->num_items > 0)
1256  lazy_vacuum(vacrel);
1257 
1258  /*
1259  * Vacuum the remainder of the Free Space Map. We must do this whether or
1260  * not there were indexes, and whether or not we bypassed index vacuuming.
1261  */
1262  if (blkno > next_fsm_block_to_vacuum)
1263  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno);
1264 
1265  /* report all blocks vacuumed */
1267 
1268  /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1269  if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1270  lazy_cleanup_all_indexes(vacrel);
1271 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4480
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2111
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4795
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4956
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:157
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:301
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:991
static void PageClearAllVisible(Page page)
Definition: bufpage.h:436
static void PageSetAllVisible(Page page)
Definition: bufpage.h:431
static bool PageIsAllVisible(Page page)
Definition: bufpage.h:426
unsigned char uint8
Definition: c.h:493
size_t Size
Definition: c.h:594
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:354
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:182
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:32
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define PROGRESS_VACUUM_MAX_DEAD_TUPLES
Definition: progress.h:26
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
TransactionId visibility_cutoff_xid
Definition: vacuumlazy.c:230
BlockNumber blkno
Definition: vacuumlazy.c:177
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void vacuum_delay_point(void)
Definition: vacuum.c:2322
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1305
static void lazy_vacuum(LVRelState *vacrel)
Definition: vacuumlazy.c:2188
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2689
static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer vmbuffer)
Definition: vacuumlazy.c:2519
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
Definition: vacuumlazy.c:1412
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *hastup, bool *recordfreespace)
Definition: vacuumlazy.c:1954
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, LVPagePruneState *prunestate)
Definition: vacuumlazy.c:1535
static BlockNumber lazy_scan_skip(LVRelState *vacrel, Buffer *vmbuffer, BlockNumber next_block, bool *next_unskippable_allvis, bool *skipping_current_range)
Definition: vacuumlazy.c:1296
#define FAILSAFE_EVERY_PAGES
Definition: vacuumlazy.c:100
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:109
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:26
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28

References LVRelState::aggressive, LVPagePruneState::all_frozen, LVPagePruneState::all_visible, Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage(), BufferIsValid(), ConditionalLockBufferForCleanup(), LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, elog(), FAILSAFE_EVERY_PAGES, FreeSpaceMapVacuumRange(), LVPagePruneState::has_lpdead_items, LVPagePruneState::hastup, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, InvalidTransactionId, InvalidXLogRecPtr, lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_new_or_empty(), lazy_scan_noprune(), lazy_scan_prune(), lazy_scan_skip(), lazy_vacuum(), lazy_vacuum_heap_page(), LVRelState::live_tuples, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, MarkBufferDirty(), Max, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::nonempty_pages, VacDeadItems::num_items, PageClearAllVisible(), PageGetHeapFreeSpace(), PageIsAllVisible(), PageSetAllVisible(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, RBM_NORMAL, ReadBufferExtended(), LVRelState::recently_dead_tuples, RecordPageWithFreeSpace(), LVRelState::rel, LVRelState::rel_pages, ReleaseBuffer(), LVRelState::relname, LVRelState::scanned_pages, TransactionIdIsValid, UnlockReleaseBuffer(), update_vacuum_error_info(), vac_estimate_reltuples(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, LVPagePruneState::visibility_cutoff_xid, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, VM_ALL_FROZEN, and WARNING.

Referenced by heap_vacuum_rel().

◆ lazy_scan_new_or_empty()

static bool lazy_scan_new_or_empty ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool  sharelock,
Buffer  vmbuffer 
)
static

Definition at line 1412 of file vacuumlazy.c.

1414 {
1415  Size freespace;
1416 
1417  if (PageIsNew(page))
1418  {
1419  /*
1420  * All-zeroes pages can be left over if either a backend extends the
1421  * relation by a single page, but crashes before the newly initialized
1422  * page has been written out, or when bulk-extending the relation
1423  * (which creates a number of empty pages at the tail end of the
1424  * relation), and then enters them into the FSM.
1425  *
1426  * Note we do not enter the page into the visibilitymap. That has the
1427  * downside that we repeatedly visit this page in subsequent vacuums,
1428  * but otherwise we'll never discover the space on a promoted standby.
1429  * The harm of repeated checking ought to normally not be too bad. The
1430  * space usually should be used at some point, otherwise there
1431  * wouldn't be any regular vacuums.
1432  *
1433  * Make sure these pages are in the FSM, to ensure they can be reused.
1434  * Do that by testing if there's any space recorded for the page. If
1435  * not, enter it. We do so after releasing the lock on the heap page,
1436  * the FSM is approximate, after all.
1437  */
1439 
1440  if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1441  {
1442  freespace = BLCKSZ - SizeOfPageHeaderData;
1443 
1444  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1445  }
1446 
1447  return true;
1448  }
1449 
1450  if (PageIsEmpty(page))
1451  {
1452  /*
1453  * It seems likely that caller will always be able to get a cleanup
1454  * lock on an empty page. But don't take any chances -- escalate to
1455  * an exclusive lock (still don't need a cleanup lock, though).
1456  */
1457  if (sharelock)
1458  {
1461 
1462  if (!PageIsEmpty(page))
1463  {
1464  /* page isn't new or empty -- keep lock and pin for now */
1465  return false;
1466  }
1467  }
1468  else
1469  {
1470  /* Already have a full cleanup lock (which is more than enough) */
1471  }
1472 
1473  /*
1474  * Unlike new pages, empty pages are always set all-visible and
1475  * all-frozen.
1476  */
1477  if (!PageIsAllVisible(page))
1478  {
1480 
1481  /* mark buffer dirty before writing a WAL record */
1483 
1484  /*
1485  * It's possible that another backend has extended the heap,
1486  * initialized the page, and then failed to WAL-log the page due
1487  * to an ERROR. Since heap extension is not WAL-logged, recovery
1488  * might try to replay our record setting the page all-visible and
1489  * find that the page isn't initialized, which will cause a PANIC.
1490  * To prevent that, check whether the page has been previously
1491  * WAL-logged, and if not, do that now.
1492  */
1493  if (RelationNeedsWAL(vacrel->rel) &&
1494  PageGetLSN(page) == InvalidXLogRecPtr)
1495  log_newpage_buffer(buf, true);
1496 
1497  PageSetAllVisible(page);
1498  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1499  vmbuffer, InvalidTransactionId,
1501  END_CRIT_SECTION();
1502  }
1503 
1504  freespace = PageGetHeapFreeSpace(page);
1506  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1507  return true;
1508  }
1509 
1510  /* page isn't new or empty -- keep lock and pin */
1511  return false;
1512 }
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:159
#define SizeOfPageHeaderData
Definition: bufpage.h:213
static XLogRecPtr PageGetLSN(Page page)
Definition: bufpage.h:383
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:232
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define END_CRIT_SECTION()
Definition: miscadmin.h:150
#define RelationNeedsWAL(relation)
Definition: rel.h:629
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1225

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, END_CRIT_SECTION, GetRecordedFreeSpace(), InvalidTransactionId, InvalidXLogRecPtr, LockBuffer(), log_newpage_buffer(), MarkBufferDirty(), PageGetHeapFreeSpace(), PageGetLSN(), PageIsAllVisible(), PageIsEmpty(), PageIsNew(), PageSetAllVisible(), RecordPageWithFreeSpace(), LVRelState::rel, RelationNeedsWAL, SizeOfPageHeaderData, START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_set().

Referenced by lazy_scan_heap().

◆ lazy_scan_noprune()

static bool lazy_scan_noprune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool hastup,
bool recordfreespace 
)
static

Definition at line 1954 of file vacuumlazy.c.

1960 {
1961  OffsetNumber offnum,
1962  maxoff;
1963  int lpdead_items,
1964  live_tuples,
1965  recently_dead_tuples,
1966  missed_dead_tuples;
1967  HeapTupleHeader tupleheader;
1968  TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
1969  MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
1970  OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
1971 
1972  Assert(BufferGetBlockNumber(buf) == blkno);
1973 
1974  *hastup = false; /* for now */
1975  *recordfreespace = false; /* for now */
1976 
1977  lpdead_items = 0;
1978  live_tuples = 0;
1979  recently_dead_tuples = 0;
1980  missed_dead_tuples = 0;
1981 
1982  maxoff = PageGetMaxOffsetNumber(page);
1983  for (offnum = FirstOffsetNumber;
1984  offnum <= maxoff;
1985  offnum = OffsetNumberNext(offnum))
1986  {
1987  ItemId itemid;
1988  HeapTupleData tuple;
1989 
1990  vacrel->offnum = offnum;
1991  itemid = PageGetItemId(page, offnum);
1992 
1993  if (!ItemIdIsUsed(itemid))
1994  continue;
1995 
1996  if (ItemIdIsRedirected(itemid))
1997  {
1998  *hastup = true;
1999  continue;
2000  }
2001 
2002  if (ItemIdIsDead(itemid))
2003  {
2004  /*
2005  * Deliberately don't set hastup=true here. See same point in
2006  * lazy_scan_prune for an explanation.
2007  */
2008  deadoffsets[lpdead_items++] = offnum;
2009  continue;
2010  }
2011 
2012  *hastup = true; /* page prevents rel truncation */
2013  tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
2014  if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
2015  &NoFreezePageRelfrozenXid,
2016  &NoFreezePageRelminMxid))
2017  {
2018  /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
2019  if (vacrel->aggressive)
2020  {
2021  /*
2022  * Aggressive VACUUMs must always be able to advance rel's
2023  * relfrozenxid to a value >= FreezeLimit (and be able to
2024  * advance rel's relminmxid to a value >= MultiXactCutoff).
2025  * The ongoing aggressive VACUUM won't be able to do that
2026  * unless it can freeze an XID (or MXID) from this tuple now.
2027  *
2028  * The only safe option is to have caller perform processing
2029  * of this page using lazy_scan_prune. Caller might have to
2030  * wait a while for a cleanup lock, but it can't be helped.
2031  */
2032  vacrel->offnum = InvalidOffsetNumber;
2033  return false;
2034  }
2035 
2036  /*
2037  * Non-aggressive VACUUMs are under no obligation to advance
2038  * relfrozenxid (even by one XID). We can be much laxer here.
2039  *
2040  * Currently we always just accept an older final relfrozenxid
2041  * and/or relminmxid value. We never make caller wait or work a
2042  * little harder, even when it likely makes sense to do so.
2043  */
2044  }
2045 
2046  ItemPointerSet(&(tuple.t_self), blkno, offnum);
2047  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2048  tuple.t_len = ItemIdGetLength(itemid);
2049  tuple.t_tableOid = RelationGetRelid(vacrel->rel);
2050 
2051  switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
2052  buf))
2053  {
2055  case HEAPTUPLE_LIVE:
2056 
2057  /*
2058  * Count both cases as live, just like lazy_scan_prune
2059  */
2060  live_tuples++;
2061 
2062  break;
2063  case HEAPTUPLE_DEAD:
2064 
2065  /*
2066  * There is some useful work for pruning to do, that won't be
2067  * done due to failure to get a cleanup lock.
2068  */
2069  missed_dead_tuples++;
2070  break;
2072 
2073  /*
2074  * Count in recently_dead_tuples, just like lazy_scan_prune
2075  */
2076  recently_dead_tuples++;
2077  break;
2079 
2080  /*
2081  * Do not count these rows as live, just like lazy_scan_prune
2082  */
2083  break;
2084  default:
2085  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2086  break;
2087  }
2088  }
2089 
2090  vacrel->offnum = InvalidOffsetNumber;
2091 
2092  /*
2093  * By here we know for sure that caller can put off freezing and pruning
2094  * this particular page until the next VACUUM. Remember its details now.
2095  * (lazy_scan_prune expects a clean slate, so we have to do this last.)
2096  */
2097  vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
2098  vacrel->NewRelminMxid = NoFreezePageRelminMxid;
2099 
2100  /* Save any LP_DEAD items found on the page in dead_items array */
2101  if (vacrel->nindexes == 0)
2102  {
2103  /* Using one-pass strategy (since table has no indexes) */
2104  if (lpdead_items > 0)
2105  {
2106  /*
2107  * Perfunctory handling for the corner case where a single pass
2108  * strategy VACUUM cannot get a cleanup lock, and it turns out
2109  * that there is one or more LP_DEAD items: just count the LP_DEAD
2110  * items as missed_dead_tuples instead. (This is a bit dishonest,
2111  * but it beats having to maintain specialized heap vacuuming code
2112  * forever, for vanishingly little benefit.)
2113  */
2114  *hastup = true;
2115  missed_dead_tuples += lpdead_items;
2116  }
2117 
2118  *recordfreespace = true;
2119  }
2120  else if (lpdead_items == 0)
2121  {
2122  /*
2123  * Won't be vacuuming this page later, so record page's freespace in
2124  * the FSM now
2125  */
2126  *recordfreespace = true;
2127  }
2128  else
2129  {
2130  VacDeadItems *dead_items = vacrel->dead_items;
2131  ItemPointerData tmp;
2132 
2133  /*
2134  * Page has LP_DEAD items, and so any references/TIDs that remain in
2135  * indexes will be deleted during index vacuuming (and then marked
2136  * LP_UNUSED in the heap)
2137  */
2138  vacrel->lpdead_item_pages++;
2139 
2140  ItemPointerSetBlockNumber(&tmp, blkno);
2141 
2142  for (int i = 0; i < lpdead_items; i++)
2143  {
2144  ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]);
2145  dead_items->items[dead_items->num_items++] = tmp;
2146  }
2147 
2148  Assert(dead_items->num_items <= dead_items->max_items);
2150  dead_items->num_items);
2151 
2152  vacrel->lpdead_items += lpdead_items;
2153 
2154  /*
2155  * Assume that we'll go on to vacuum this heap page during final pass
2156  * over the heap. Don't record free space until then.
2157  */
2158  *recordfreespace = false;
2159  }
2160 
2161  /*
2162  * Finally, add relevant page-local counts to whole-VACUUM counts
2163  */
2164  vacrel->live_tuples += live_tuples;
2165  vacrel->recently_dead_tuples += recently_dead_tuples;
2166  vacrel->missed_dead_tuples += missed_dead_tuples;
2167  if (missed_dead_tuples > 0)
2168  vacrel->missed_dead_pages++;
2169 
2170  /* Caller won't need to call lazy_scan_prune with same page */
2171  return true;
2172 }
TransactionId MultiXactId
Definition: c.h:651
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition: heapam.c:7376
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition: itemptr.h:147
#define PROGRESS_VACUUM_NUM_DEAD_TUPLES
Definition: progress.h:27
ItemPointerData items[FLEXIBLE_ARRAY_MEMBER]
Definition: vacuum.h:288

References LVRelState::aggressive, Assert(), buf, BufferGetBlockNumber(), LVRelState::cutoffs, LVRelState::dead_items, elog(), ERROR, FirstOffsetNumber, heap_tuple_should_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuum(), i, InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), VacDeadItems::items, LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, VacDeadItems::num_items, LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), pgstat_progress_update_param(), PROGRESS_VACUUM_NUM_DEAD_TUPLES, LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by lazy_scan_heap().

◆ lazy_scan_prune()

static void lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
LVPagePruneState prunestate 
)
static

Definition at line 1535 of file vacuumlazy.c.

1540 {
1541  Relation rel = vacrel->rel;
1542  OffsetNumber offnum,
1543  maxoff;
1544  ItemId itemid;
1545  HeapTupleData tuple;
1546  HTSV_Result res;
1547  PruneResult presult;
1548  int tuples_frozen,
1549  lpdead_items,
1550  live_tuples,
1551  recently_dead_tuples;
1552  HeapPageFreeze pagefrz;
1553  int64 fpi_before = pgWalUsage.wal_fpi;
1554  OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
1556 
1557  Assert(BufferGetBlockNumber(buf) == blkno);
1558 
1559  /*
1560  * maxoff might be reduced following line pointer array truncation in
1561  * heap_page_prune. That's safe for us to ignore, since the reclaimed
1562  * space will continue to look like LP_UNUSED items below.
1563  */
1564  maxoff = PageGetMaxOffsetNumber(page);
1565 
1566 retry:
1567 
1568  /* Initialize (or reset) page-level state */
1569  pagefrz.freeze_required = false;
1570  pagefrz.FreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
1571  pagefrz.FreezePageRelminMxid = vacrel->NewRelminMxid;
1572  pagefrz.NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
1573  pagefrz.NoFreezePageRelminMxid = vacrel->NewRelminMxid;
1574  tuples_frozen = 0;
1575  lpdead_items = 0;
1576  live_tuples = 0;
1577  recently_dead_tuples = 0;
1578 
1579  /*
1580  * Prune all HOT-update chains in this page.
1581  *
1582  * We count the number of tuples removed from the page by the pruning step
1583  * in presult.ndeleted. It should not be confused with lpdead_items;
1584  * lpdead_items's final value can be thought of as the number of tuples
1585  * that were deleted from indexes.
1586  */
1587  heap_page_prune(rel, buf, vacrel->vistest, &presult, &vacrel->offnum);
1588 
1589  /*
1590  * Now scan the page to collect LP_DEAD items and check for tuples
1591  * requiring freezing among remaining tuples with storage
1592  */
1593  prunestate->hastup = false;
1594  prunestate->has_lpdead_items = false;
1595  prunestate->all_visible = true;
1596  prunestate->all_frozen = true;
1598 
1599  for (offnum = FirstOffsetNumber;
1600  offnum <= maxoff;
1601  offnum = OffsetNumberNext(offnum))
1602  {
1603  bool totally_frozen;
1604 
1605  /*
1606  * Set the offset number so that we can display it along with any
1607  * error that occurred while processing this tuple.
1608  */
1609  vacrel->offnum = offnum;
1610  itemid = PageGetItemId(page, offnum);
1611 
1612  if (!ItemIdIsUsed(itemid))
1613  continue;
1614 
1615  /* Redirect items mustn't be touched */
1616  if (ItemIdIsRedirected(itemid))
1617  {
1618  /* page makes rel truncation unsafe */
1619  prunestate->hastup = true;
1620  continue;
1621  }
1622 
1623  if (ItemIdIsDead(itemid))
1624  {
1625  /*
1626  * Deliberately don't set hastup for LP_DEAD items. We make the
1627  * soft assumption that any LP_DEAD items encountered here will
1628  * become LP_UNUSED later on, before count_nondeletable_pages is
1629  * reached. If we don't make this assumption then rel truncation
1630  * will only happen every other VACUUM, at most. Besides, VACUUM
1631  * must treat hastup/nonempty_pages as provisional no matter how
1632  * LP_DEAD items are handled (handled here, or handled later on).
1633  *
1634  * Also deliberately delay unsetting all_visible until just before
1635  * we return to lazy_scan_heap caller, as explained in full below.
1636  * (This is another case where it's useful to anticipate that any
1637  * LP_DEAD items will become LP_UNUSED during the ongoing VACUUM.)
1638  */
1639  deadoffsets[lpdead_items++] = offnum;
1640  continue;
1641  }
1642 
1643  Assert(ItemIdIsNormal(itemid));
1644 
1645  ItemPointerSet(&(tuple.t_self), blkno, offnum);
1646  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1647  tuple.t_len = ItemIdGetLength(itemid);
1648  tuple.t_tableOid = RelationGetRelid(rel);
1649 
1650  /*
1651  * DEAD tuples are almost always pruned into LP_DEAD line pointers by
1652  * heap_page_prune(), but it's possible that the tuple state changed
1653  * since heap_page_prune() looked. Handle that here by restarting.
1654  * (See comments at the top of function for a full explanation.)
1655  */
1656  res = HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
1657  buf);
1658 
1659  if (unlikely(res == HEAPTUPLE_DEAD))
1660  goto retry;
1661 
1662  /*
1663  * The criteria for counting a tuple as live in this block need to
1664  * match what analyze.c's acquire_sample_rows() does, otherwise VACUUM
1665  * and ANALYZE may produce wildly different reltuples values, e.g.
1666  * when there are many recently-dead tuples.
1667  *
1668  * The logic here is a bit simpler than acquire_sample_rows(), as
1669  * VACUUM can't run inside a transaction block, which makes some cases
1670  * impossible (e.g. in-progress insert from the same transaction).
1671  *
1672  * We treat LP_DEAD items (which are the closest thing to DEAD tuples
1673  * that might be seen here) differently, too: we assume that they'll
1674  * become LP_UNUSED before VACUUM finishes. This difference is only
1675  * superficial. VACUUM effectively agrees with ANALYZE about DEAD
1676  * items, in the end. VACUUM won't remember LP_DEAD items, but only
1677  * because they're not supposed to be left behind when it is done.
1678  * (Cases where we bypass index vacuuming will violate this optimistic
1679  * assumption, but the overall impact of that should be negligible.)
1680  */
1681  switch (res)
1682  {
1683  case HEAPTUPLE_LIVE:
1684 
1685  /*
1686  * Count it as live. Not only is this natural, but it's also
1687  * what acquire_sample_rows() does.
1688  */
1689  live_tuples++;
1690 
1691  /*
1692  * Is the tuple definitely visible to all transactions?
1693  *
1694  * NB: Like with per-tuple hint bits, we can't set the
1695  * PD_ALL_VISIBLE flag if the inserter committed
1696  * asynchronously. See SetHintBits for more info. Check that
1697  * the tuple is hinted xmin-committed because of that.
1698  */
1699  if (prunestate->all_visible)
1700  {
1701  TransactionId xmin;
1702 
1704  {
1705  prunestate->all_visible = false;
1706  break;
1707  }
1708 
1709  /*
1710  * The inserter definitely committed. But is it old enough
1711  * that everyone sees it as committed?
1712  */
1713  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1714  if (!TransactionIdPrecedes(xmin,
1715  vacrel->cutoffs.OldestXmin))
1716  {
1717  prunestate->all_visible = false;
1718  break;
1719  }
1720 
1721  /* Track newest xmin on page. */
1722  if (TransactionIdFollows(xmin, prunestate->visibility_cutoff_xid) &&
1723  TransactionIdIsNormal(xmin))
1724  prunestate->visibility_cutoff_xid = xmin;
1725  }
1726  break;
1728 
1729  /*
1730  * If tuple is recently dead then we must not remove it from
1731  * the relation. (We only remove items that are LP_DEAD from
1732  * pruning.)
1733  */
1734  recently_dead_tuples++;
1735  prunestate->all_visible = false;
1736  break;
1738 
1739  /*
1740  * We do not count these rows as live, because we expect the
1741  * inserting transaction to update the counters at commit, and
1742  * we assume that will happen only after we report our
1743  * results. This assumption is a bit shaky, but it is what
1744  * acquire_sample_rows() does, so be consistent.
1745  */
1746  prunestate->all_visible = false;
1747  break;
1749  /* This is an expected case during concurrent vacuum */
1750  prunestate->all_visible = false;
1751 
1752  /*
1753  * Count such rows as live. As above, we assume the deleting
1754  * transaction will commit and update the counters after we
1755  * report.
1756  */
1757  live_tuples++;
1758  break;
1759  default:
1760  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1761  break;
1762  }
1763 
1764  prunestate->hastup = true; /* page makes rel truncation unsafe */
1765 
1766  /* Tuple with storage -- consider need to freeze */
1767  if (heap_prepare_freeze_tuple(tuple.t_data, &vacrel->cutoffs, &pagefrz,
1768  &frozen[tuples_frozen], &totally_frozen))
1769  {
1770  /* Save prepared freeze plan for later */
1771  frozen[tuples_frozen++].offset = offnum;
1772  }
1773 
1774  /*
1775  * If any tuple isn't either totally frozen already or eligible to
1776  * become totally frozen (according to its freeze plan), then the page
1777  * definitely cannot be set all-frozen in the visibility map later on
1778  */
1779  if (!totally_frozen)
1780  prunestate->all_frozen = false;
1781  }
1782 
1783  /*
1784  * We have now divided every item on the page into either an LP_DEAD item
1785  * that will need to be vacuumed in indexes later, or a LP_NORMAL tuple
1786  * that remains and needs to be considered for freezing now (LP_UNUSED and
1787  * LP_REDIRECT items also remain, but are of no further interest to us).
1788  */
1789  vacrel->offnum = InvalidOffsetNumber;
1790 
1791  /*
1792  * Freeze the page when heap_prepare_freeze_tuple indicates that at least
1793  * one XID/MXID from before FreezeLimit/MultiXactCutoff is present. Also
1794  * freeze when pruning generated an FPI, if doing so means that we set the
1795  * page all-frozen afterwards (might not happen until final heap pass).
1796  */
1797  if (pagefrz.freeze_required || tuples_frozen == 0 ||
1798  (prunestate->all_visible && prunestate->all_frozen &&
1799  fpi_before != pgWalUsage.wal_fpi))
1800  {
1801  /*
1802  * We're freezing the page. Our final NewRelfrozenXid doesn't need to
1803  * be affected by the XIDs that are just about to be frozen anyway.
1804  */
1805  vacrel->NewRelfrozenXid = pagefrz.FreezePageRelfrozenXid;
1806  vacrel->NewRelminMxid = pagefrz.FreezePageRelminMxid;
1807 
1808  if (tuples_frozen == 0)
1809  {
1810  /*
1811  * We have no freeze plans to execute, so there's no added cost
1812  * from following the freeze path. That's why it was chosen. This
1813  * is important in the case where the page only contains totally
1814  * frozen tuples at this point (perhaps only following pruning).
1815  * Such pages can be marked all-frozen in the VM by our caller,
1816  * even though none of its tuples were newly frozen here (note
1817  * that the "no freeze" path never sets pages all-frozen).
1818  *
1819  * We never increment the frozen_pages instrumentation counter
1820  * here, since it only counts pages with newly frozen tuples
1821  * (don't confuse that with pages newly set all-frozen in VM).
1822  */
1823  }
1824  else
1825  {
1826  TransactionId snapshotConflictHorizon;
1827 
1828  vacrel->frozen_pages++;
1829 
1830  /*
1831  * We can use visibility_cutoff_xid as our cutoff for conflicts
1832  * when the whole page is eligible to become all-frozen in the VM
1833  * once we're done with it. Otherwise we generate a conservative
1834  * cutoff by stepping back from OldestXmin.
1835  */
1836  if (prunestate->all_visible && prunestate->all_frozen)
1837  {
1838  /* Using same cutoff when setting VM is now unnecessary */
1839  snapshotConflictHorizon = prunestate->visibility_cutoff_xid;
1841  }
1842  else
1843  {
1844  /* Avoids false conflicts when hot_standby_feedback in use */
1845  snapshotConflictHorizon = vacrel->cutoffs.OldestXmin;
1846  TransactionIdRetreat(snapshotConflictHorizon);
1847  }
1848 
1849  /* Execute all freeze plans for page as a single atomic action */
1851  snapshotConflictHorizon,
1852  frozen, tuples_frozen);
1853  }
1854  }
1855  else
1856  {
1857  /*
1858  * Page requires "no freeze" processing. It might be set all-visible
1859  * in the visibility map, but it can never be set all-frozen.
1860  */
1861  vacrel->NewRelfrozenXid = pagefrz.NoFreezePageRelfrozenXid;
1862  vacrel->NewRelminMxid = pagefrz.NoFreezePageRelminMxid;
1863  prunestate->all_frozen = false;
1864  tuples_frozen = 0; /* avoid miscounts in instrumentation */
1865  }
1866 
1867  /*
1868  * VACUUM will call heap_page_is_all_visible() during the second pass over
1869  * the heap to determine all_visible and all_frozen for the page -- this
1870  * is a specialized version of the logic from this function. Now that
1871  * we've finished pruning and freezing, make sure that we're in total
1872  * agreement with heap_page_is_all_visible() using an assertion.
1873  */
1874 #ifdef USE_ASSERT_CHECKING
1875  /* Note that all_frozen value does not matter when !all_visible */
1876  if (prunestate->all_visible && lpdead_items == 0)
1877  {
1878  TransactionId cutoff;
1879  bool all_frozen;
1880 
1881  if (!heap_page_is_all_visible(vacrel, buf, &cutoff, &all_frozen))
1882  Assert(false);
1883 
1884  Assert(!TransactionIdIsValid(cutoff) ||
1885  cutoff == prunestate->visibility_cutoff_xid);
1886  }
1887 #endif
1888 
1889  /*
1890  * Now save details of the LP_DEAD items from the page in vacrel
1891  */
1892  if (lpdead_items > 0)
1893  {
1894  VacDeadItems *dead_items = vacrel->dead_items;
1895  ItemPointerData tmp;
1896 
1897  vacrel->lpdead_item_pages++;
1898  prunestate->has_lpdead_items = true;
1899 
1900  ItemPointerSetBlockNumber(&tmp, blkno);
1901 
1902  for (int i = 0; i < lpdead_items; i++)
1903  {
1904  ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]);
1905  dead_items->items[dead_items->num_items++] = tmp;
1906  }
1907 
1908  Assert(dead_items->num_items <= dead_items->max_items);
1910  dead_items->num_items);
1911 
1912  /*
1913  * It was convenient to ignore LP_DEAD items in all_visible earlier on
1914  * to make the choice of whether or not to freeze the page unaffected
1915  * by the short-term presence of LP_DEAD items. These LP_DEAD items
1916  * were effectively assumed to be LP_UNUSED items in the making. It
1917  * doesn't matter which heap pass (initial pass or final pass) ends up
1918  * setting the page all-frozen, as long as the ongoing VACUUM does it.
1919  *
1920  * Now that freezing has been finalized, unset all_visible. It needs
1921  * to reflect the present state of things, as expected by our caller.
1922  */
1923  prunestate->all_visible = false;
1924  }
1925 
1926  /* Finally, add page-local counts to whole-VACUUM counts */
1927  vacrel->tuples_deleted += presult.ndeleted;
1928  vacrel->tuples_frozen += tuples_frozen;
1929  vacrel->lpdead_items += lpdead_items;
1930  vacrel->live_tuples += live_tuples;
1931  vacrel->recently_dead_tuples += recently_dead_tuples;
1932 }
void heap_freeze_execute_prepared(Relation rel, Buffer buffer, TransactionId snapshotConflictHorizon, HeapTupleFreeze *tuples, int ntuples)
Definition: heapam.c:6664
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen)
Definition: heapam.c:6361
HTSV_Result
Definition: heapam.h:95
void heap_page_prune(Relation relation, Buffer buffer, GlobalVisState *vistest, PruneResult *presult, OffsetNumber *off_loc)
Definition: pruneheap.c:213
MultiXactId NoFreezePageRelminMxid
Definition: heapam.h:190
TransactionId FreezePageRelfrozenXid
Definition: heapam.h:178
bool freeze_required
Definition: heapam.h:152
MultiXactId FreezePageRelminMxid
Definition: heapam.h:179
TransactionId NoFreezePageRelfrozenXid
Definition: heapam.h:189
OffsetNumber offset
Definition: heapam.h:122
int ndeleted
Definition: heapam.h:199
#define TransactionIdRetreat(dest)
Definition: transam.h:141
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:3280

References LVPagePruneState::all_frozen, LVPagePruneState::all_visible, Assert(), buf, BufferGetBlockNumber(), LVRelState::cutoffs, LVRelState::dead_items, elog(), ERROR, FirstOffsetNumber, HeapPageFreeze::freeze_required, HeapPageFreeze::FreezePageRelfrozenXid, HeapPageFreeze::FreezePageRelminMxid, LVRelState::frozen_pages, LVPagePruneState::has_lpdead_items, LVPagePruneState::hastup, heap_freeze_execute_prepared(), heap_page_is_all_visible(), heap_page_prune(), heap_prepare_freeze_tuple(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), i, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), VacDeadItems::items, LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, VacDeadItems::max_items, MaxHeapTuplesPerPage, PruneResult::ndeleted, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, HeapPageFreeze::NoFreezePageRelfrozenXid, HeapPageFreeze::NoFreezePageRelminMxid, VacDeadItems::num_items, LVRelState::offnum, HeapTupleFreeze::offset, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), pgstat_progress_update_param(), pgWalUsage, PROGRESS_VACUUM_NUM_DEAD_TUPLES, LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, res, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, TransactionIdIsValid, TransactionIdPrecedes(), TransactionIdRetreat, LVRelState::tuples_deleted, LVRelState::tuples_frozen, unlikely, LVPagePruneState::visibility_cutoff_xid, LVRelState::vistest, and WalUsage::wal_fpi.

Referenced by lazy_scan_heap().

◆ lazy_scan_skip()

static BlockNumber lazy_scan_skip ( LVRelState vacrel,
Buffer vmbuffer,
BlockNumber  next_block,
bool next_unskippable_allvis,
bool skipping_current_range 
)
static

Definition at line 1296 of file vacuumlazy.c.

1298 {
1299  BlockNumber rel_pages = vacrel->rel_pages,
1300  next_unskippable_block = next_block,
1301  nskippable_blocks = 0;
1302  bool skipsallvis = false;
1303 
1304  *next_unskippable_allvis = true;
1305  while (next_unskippable_block < rel_pages)
1306  {
1307  uint8 mapbits = visibilitymap_get_status(vacrel->rel,
1308  next_unskippable_block,
1309  vmbuffer);
1310 
1311  if ((mapbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
1312  {
1313  Assert((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0);
1314  *next_unskippable_allvis = false;
1315  break;
1316  }
1317 
1318  /*
1319  * Caller must scan the last page to determine whether it has tuples
1320  * (caller must have the opportunity to set vacrel->nonempty_pages).
1321  * This rule avoids having lazy_truncate_heap() take access-exclusive
1322  * lock on rel to attempt a truncation that fails anyway, just because
1323  * there are tuples on the last page (it is likely that there will be
1324  * tuples on other nearby pages as well, but those can be skipped).
1325  *
1326  * Implement this by always treating the last block as unsafe to skip.
1327  */
1328  if (next_unskippable_block == rel_pages - 1)
1329  break;
1330 
1331  /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1332  if (!vacrel->skipwithvm)
1333  {
1334  /* Caller shouldn't rely on all_visible_according_to_vm */
1335  *next_unskippable_allvis = false;
1336  break;
1337  }
1338 
1339  /*
1340  * Aggressive VACUUM caller can't skip pages just because they are
1341  * all-visible. They may still skip all-frozen pages, which can't
1342  * contain XIDs < OldestXmin (XIDs that aren't already frozen by now).
1343  */
1344  if ((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0)
1345  {
1346  if (vacrel->aggressive)
1347  break;
1348 
1349  /*
1350  * All-visible block is safe to skip in non-aggressive case. But
1351  * remember that the final range contains such a block for later.
1352  */
1353  skipsallvis = true;
1354  }
1355 
1357  next_unskippable_block++;
1358  nskippable_blocks++;
1359  }
1360 
1361  /*
1362  * We only skip a range with at least SKIP_PAGES_THRESHOLD consecutive
1363  * pages. Since we're reading sequentially, the OS should be doing
1364  * readahead for us, so there's no gain in skipping a page now and then.
1365  * Skipping such a range might even discourage sequential detection.
1366  *
1367  * This test also enables more frequent relfrozenxid advancement during
1368  * non-aggressive VACUUMs. If the range has any all-visible pages then
1369  * skipping makes updating relfrozenxid unsafe, which is a real downside.
1370  */
1371  if (nskippable_blocks < SKIP_PAGES_THRESHOLD)
1372  *skipping_current_range = false;
1373  else
1374  {
1375  *skipping_current_range = true;
1376  if (skipsallvis)
1377  vacrel->skippedallvis = true;
1378  }
1379 
1380  return next_unskippable_block;
1381 }
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:116

References LVRelState::aggressive, Assert(), LVRelState::rel, LVRelState::rel_pages, SKIP_PAGES_THRESHOLD, LVRelState::skippedallvis, LVRelState::skipwithvm, vacuum_delay_point(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_get_status().

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 2885 of file vacuumlazy.c.

2886 {
2887  BlockNumber orig_rel_pages = vacrel->rel_pages;
2888  BlockNumber new_rel_pages;
2889  bool lock_waiter_detected;
2890  int lock_retry;
2891 
2892  /* Report that we are now truncating */
2895 
2896  /* Update error traceback information one last time */
2899 
2900  /*
2901  * Loop until no more truncating can be done.
2902  */
2903  do
2904  {
2905  /*
2906  * We need full exclusive lock on the relation in order to do
2907  * truncation. If we can't get it, give up rather than waiting --- we
2908  * don't want to block other backends, and we don't want to deadlock
2909  * (which is quite possible considering we already hold a lower-grade
2910  * lock).
2911  */
2912  lock_waiter_detected = false;
2913  lock_retry = 0;
2914  while (true)
2915  {
2917  break;
2918 
2919  /*
2920  * Check for interrupts while trying to (re-)acquire the exclusive
2921  * lock.
2922  */
2924 
2925  if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
2927  {
2928  /*
2929  * We failed to establish the lock in the specified number of
2930  * retries. This means we give up truncating.
2931  */
2932  ereport(vacrel->verbose ? INFO : DEBUG2,
2933  (errmsg("\"%s\": stopping truncate due to conflicting lock request",
2934  vacrel->relname)));
2935  return;
2936  }
2937 
2938  (void) WaitLatch(MyLatch,
2941  WAIT_EVENT_VACUUM_TRUNCATE);
2943  }
2944 
2945  /*
2946  * Now that we have exclusive lock, look to see if the rel has grown
2947  * whilst we were vacuuming with non-exclusive lock. If so, give up;
2948  * the newly added pages presumably contain non-deletable tuples.
2949  */
2950  new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
2951  if (new_rel_pages != orig_rel_pages)
2952  {
2953  /*
2954  * Note: we intentionally don't update vacrel->rel_pages with the
2955  * new rel size here. If we did, it would amount to assuming that
2956  * the new pages are empty, which is unlikely. Leaving the numbers
2957  * alone amounts to assuming that the new pages have the same
2958  * tuple density as existing ones, which is less unlikely.
2959  */
2961  return;
2962  }
2963 
2964  /*
2965  * Scan backwards from the end to verify that the end pages actually
2966  * contain no tuples. This is *necessary*, not optional, because
2967  * other backends could have added tuples to these pages whilst we
2968  * were vacuuming.
2969  */
2970  new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
2971  vacrel->blkno = new_rel_pages;
2972 
2973  if (new_rel_pages >= orig_rel_pages)
2974  {
2975  /* can't do anything after all */
2977  return;
2978  }
2979 
2980  /*
2981  * Okay to truncate.
2982  */
2983  RelationTruncate(vacrel->rel, new_rel_pages);
2984 
2985  /*
2986  * We can release the exclusive lock as soon as we have truncated.
2987  * Other backends can't safely access the relation until they have
2988  * processed the smgr invalidation that smgrtruncate sent out ... but
2989  * that should happen as part of standard invalidation processing once
2990  * they acquire lock on the relation.
2991  */
2993 
2994  /*
2995  * Update statistics. Here, it *is* correct to adjust rel_pages
2996  * without also touching reltuples, since the tuple count wasn't
2997  * changed by the truncation.
2998  */
2999  vacrel->removed_pages += orig_rel_pages - new_rel_pages;
3000  vacrel->rel_pages = new_rel_pages;
3001 
3002  ereport(vacrel->verbose ? INFO : DEBUG2,
3003  (errmsg("table \"%s\": truncated %u to %u pages",
3004  vacrel->relname,
3005  orig_rel_pages, new_rel_pages)));
3006  orig_rel_pages = new_rel_pages;
3007  } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
3008 }
struct Latch * MyLatch
Definition: globals.c:58
void ResetLatch(Latch *latch)
Definition: latch.c:697
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:490
#define WL_TIMEOUT
Definition: latch.h:128
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
#define WL_LATCH_SET
Definition: latch.h:125
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:311
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:276
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:36
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:287
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:87
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:88
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
Definition: vacuumlazy.c:3016

References AccessExclusiveLock, LVRelState::blkno, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), INFO, InvalidOffsetNumber, MyLatch, LVRelState::nonempty_pages, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationTruncate(), LVRelState::relname, LVRelState::removed_pages, ResetLatch(), UnlockRelation(), update_vacuum_error_info(), VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, LVRelState::verbose, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 2188 of file vacuumlazy.c.

2189 {
2190  bool bypass;
2191 
2192  /* Should not end up here with no indexes */
2193  Assert(vacrel->nindexes > 0);
2194  Assert(vacrel->lpdead_item_pages > 0);
2195 
2196  if (!vacrel->do_index_vacuuming)
2197  {
2198  Assert(!vacrel->do_index_cleanup);
2199  vacrel->dead_items->num_items = 0;
2200  return;
2201  }
2202 
2203  /*
2204  * Consider bypassing index vacuuming (and heap vacuuming) entirely.
2205  *
2206  * We currently only do this in cases where the number of LP_DEAD items
2207  * for the entire VACUUM operation is close to zero. This avoids sharp
2208  * discontinuities in the duration and overhead of successive VACUUM
2209  * operations that run against the same table with a fixed workload.
2210  * Ideally, successive VACUUM operations will behave as if there are
2211  * exactly zero LP_DEAD items in cases where there are close to zero.
2212  *
2213  * This is likely to be helpful with a table that is continually affected
2214  * by UPDATEs that can mostly apply the HOT optimization, but occasionally
2215  * have small aberrations that lead to just a few heap pages retaining
2216  * only one or two LP_DEAD items. This is pretty common; even when the
2217  * DBA goes out of their way to make UPDATEs use HOT, it is practically
2218  * impossible to predict whether HOT will be applied in 100% of cases.
2219  * It's far easier to ensure that 99%+ of all UPDATEs against a table use
2220  * HOT through careful tuning.
2221  */
2222  bypass = false;
2223  if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
2224  {
2225  BlockNumber threshold;
2226 
2227  Assert(vacrel->num_index_scans == 0);
2228  Assert(vacrel->lpdead_items == vacrel->dead_items->num_items);
2229  Assert(vacrel->do_index_vacuuming);
2230  Assert(vacrel->do_index_cleanup);
2231 
2232  /*
2233  * This crossover point at which we'll start to do index vacuuming is
2234  * expressed as a percentage of the total number of heap pages in the
2235  * table that are known to have at least one LP_DEAD item. This is
2236  * much more important than the total number of LP_DEAD items, since
2237  * it's a proxy for the number of heap pages whose visibility map bits
2238  * cannot be set on account of bypassing index and heap vacuuming.
2239  *
2240  * We apply one further precautionary test: the space currently used
2241  * to store the TIDs (TIDs that now all point to LP_DEAD items) must
2242  * not exceed 32MB. This limits the risk that we will bypass index
2243  * vacuuming again and again until eventually there is a VACUUM whose
2244  * dead_items space is not CPU cache resident.
2245  *
2246  * We don't take any special steps to remember the LP_DEAD items (such
2247  * as counting them in our final update to the stats system) when the
2248  * optimization is applied. Though the accounting used in analyze.c's
2249  * acquire_sample_rows() will recognize the same LP_DEAD items as dead
2250  * rows in its own stats report, that's okay. The discrepancy should
2251  * be negligible. If this optimization is ever expanded to cover more
2252  * cases then this may need to be reconsidered.
2253  */
2254  threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
2255  bypass = (vacrel->lpdead_item_pages < threshold &&
2256  vacrel->lpdead_items < MAXDEADITEMS(32L * 1024L * 1024L));
2257  }
2258 
2259  if (bypass)
2260  {
2261  /*
2262  * There are almost zero TIDs. Behave as if there were precisely
2263  * zero: bypass index vacuuming, but do index cleanup.
2264  *
2265  * We expect that the ongoing VACUUM operation will finish very
2266  * quickly, so there is no point in considering speeding up as a
2267  * failsafe against wraparound failure. (Index cleanup is expected to
2268  * finish very quickly in cases where there were no ambulkdelete()
2269  * calls.)
2270  */
2271  vacrel->do_index_vacuuming = false;
2272  }
2273  else if (lazy_vacuum_all_indexes(vacrel))
2274  {
2275  /*
2276  * We successfully completed a round of index vacuuming. Do related
2277  * heap vacuuming now.
2278  */
2279  lazy_vacuum_heap_rel(vacrel);
2280  }
2281  else
2282  {
2283  /*
2284  * Failsafe case.
2285  *
2286  * We attempted index vacuuming, but didn't finish a full round/full
2287  * index scan. This happens when relfrozenxid or relminmxid is too
2288  * far in the past.
2289  *
2290  * From this point on the VACUUM operation will do no further index
2291  * vacuuming or heap vacuuming. This VACUUM operation won't end up
2292  * back here again.
2293  */
2295  }
2296 
2297  /*
2298  * Forget the LP_DEAD items that we just vacuumed (or just decided to not
2299  * vacuum)
2300  */
2301  vacrel->dead_items->num_items = 0;
2302 }
#define BYPASS_THRESHOLD_PAGES
Definition: vacuumlazy.c:94
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2313
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
Definition: vacuumlazy.c:2431

References Assert(), BYPASS_THRESHOLD_PAGES, LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAXDEADITEMS, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItems::num_items, LVRelState::rel_pages, and VacuumFailsafeActive.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 2313 of file vacuumlazy.c.

2314 {
2315  bool allindexes = true;
2316  double old_live_tuples = vacrel->rel->rd_rel->reltuples;
2317  const int progress_start_index[] = {
2320  };
2321  const int progress_end_index[] = {
2325  };
2326  int64 progress_start_val[2];
2327  int64 progress_end_val[3];
2328 
2329  Assert(vacrel->nindexes > 0);
2330  Assert(vacrel->do_index_vacuuming);
2331  Assert(vacrel->do_index_cleanup);
2332 
2333  /* Precheck for XID wraparound emergencies */
2334  if (lazy_check_wraparound_failsafe(vacrel))
2335  {
2336  /* Wraparound emergency -- don't even start an index scan */
2337  return false;
2338  }
2339 
2340  /*
2341  * Report that we are now vacuuming indexes and the number of indexes to
2342  * vacuum.
2343  */
2344  progress_start_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_INDEX;
2345  progress_start_val[1] = vacrel->nindexes;
2346  pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
2347 
2348  if (!ParallelVacuumIsActive(vacrel))
2349  {
2350  for (int idx = 0; idx < vacrel->nindexes; idx++)
2351  {
2352  Relation indrel = vacrel->indrels[idx];
2353  IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2354 
2355  vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat,
2356  old_live_tuples,
2357  vacrel);
2358 
2359  /* Report the number of indexes vacuumed */
2361  idx + 1);
2362 
2363  if (lazy_check_wraparound_failsafe(vacrel))
2364  {
2365  /* Wraparound emergency -- end current index scan */
2366  allindexes = false;
2367  break;
2368  }
2369  }
2370  }
2371  else
2372  {
2373  /* Outsource everything to parallel variant */
2374  parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples,
2375  vacrel->num_index_scans);
2376 
2377  /*
2378  * Do a postcheck to consider applying wraparound failsafe now. Note
2379  * that parallel VACUUM only gets the precheck and this postcheck.
2380  */
2381  if (lazy_check_wraparound_failsafe(vacrel))
2382  allindexes = false;
2383  }
2384 
2385  /*
2386  * We delete all LP_DEAD items from the first heap pass in all indexes on
2387  * each call here (except calls where we choose to do the failsafe). This
2388  * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2389  * of the failsafe triggering, which prevents the next call from taking
2390  * place).
2391  */
2392  Assert(vacrel->num_index_scans > 0 ||
2393  vacrel->dead_items->num_items == vacrel->lpdead_items);
2394  Assert(allindexes || VacuumFailsafeActive);
2395 
2396  /*
2397  * Increase and report the number of index scans. Also, we reset
2398  * PROGRESS_VACUUM_INDEXES_TOTAL and PROGRESS_VACUUM_INDEXES_PROCESSED.
2399  *
2400  * We deliberately include the case where we started a round of bulk
2401  * deletes that we weren't able to finish due to the failsafe triggering.
2402  */
2403  vacrel->num_index_scans++;
2404  progress_end_val[0] = 0;
2405  progress_end_val[1] = 0;
2406  progress_end_val[2] = vacrel->num_index_scans;
2407  pgstat_progress_update_multi_param(3, progress_end_index, progress_end_val);
2408 
2409  return allindexes;
2410 }
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:33
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
Definition: vacuumlazy.c:2757
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)

References Assert(), LVRelState::dead_items, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, idx(), LVRelState::indrels, LVRelState::indstats, lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItems::num_items, parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, LVRelState::pvs, RelationData::rd_rel, LVRelState::rel, and VacuumFailsafeActive.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static int lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
int  index,
Buffer  vmbuffer 
)
static

Definition at line 2519 of file vacuumlazy.c.

2521 {
2522  VacDeadItems *dead_items = vacrel->dead_items;
2523  Page page = BufferGetPage(buffer);
2525  int nunused = 0;
2526  TransactionId visibility_cutoff_xid;
2527  bool all_frozen;
2528  LVSavedErrInfo saved_err_info;
2529 
2530  Assert(vacrel->nindexes == 0 || vacrel->do_index_vacuuming);
2531 
2533 
2534  /* Update error traceback information */
2535  update_vacuum_error_info(vacrel, &saved_err_info,
2538 
2540 
2541  for (; index < dead_items->num_items; index++)
2542  {
2543  BlockNumber tblk;
2544  OffsetNumber toff;
2545  ItemId itemid;
2546 
2547  tblk = ItemPointerGetBlockNumber(&dead_items->items[index]);
2548  if (tblk != blkno)
2549  break; /* past end of tuples for this block */
2550  toff = ItemPointerGetOffsetNumber(&dead_items->items[index]);
2551  itemid = PageGetItemId(page, toff);
2552 
2553  Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2554  ItemIdSetUnused(itemid);
2555  unused[nunused++] = toff;
2556  }
2557 
2558  Assert(nunused > 0);
2559 
2560  /* Attempt to truncate line pointer array now */
2562 
2563  /*
2564  * Mark buffer dirty before we write WAL.
2565  */
2566  MarkBufferDirty(buffer);
2567 
2568  /* XLOG stuff */
2569  if (RelationNeedsWAL(vacrel->rel))
2570  {
2571  xl_heap_vacuum xlrec;
2572  XLogRecPtr recptr;
2573 
2574  xlrec.nunused = nunused;
2575 
2576  XLogBeginInsert();
2577  XLogRegisterData((char *) &xlrec, SizeOfHeapVacuum);
2578 
2579  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
2580  XLogRegisterBufData(0, (char *) unused, nunused * sizeof(OffsetNumber));
2581 
2582  recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VACUUM);
2583 
2584  PageSetLSN(page, recptr);
2585  }
2586 
2587  /*
2588  * End critical section, so we safely can do visibility tests (which
2589  * possibly need to perform IO and allocate memory!). If we crash now the
2590  * page (including the corresponding vm bit) might not be marked all
2591  * visible, but that's fine. A later vacuum will fix that.
2592  */
2593  END_CRIT_SECTION();
2594 
2595  /*
2596  * Now that we have removed the LP_DEAD items from the page, once again
2597  * check if the page has become all-visible. The page is already marked
2598  * dirty, exclusively locked, and, if needed, a full page image has been
2599  * emitted.
2600  */
2601  Assert(!PageIsAllVisible(page));
2602  if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
2603  &all_frozen))
2604  {
2606 
2607  if (all_frozen)
2608  {
2609  Assert(!TransactionIdIsValid(visibility_cutoff_xid));
2610  flags |= VISIBILITYMAP_ALL_FROZEN;
2611  }
2612 
2613  PageSetAllVisible(page);
2614  visibilitymap_set(vacrel->rel, blkno, buffer, InvalidXLogRecPtr,
2615  vmbuffer, visibility_cutoff_xid, flags);
2616  }
2617 
2618  /* Revert to the previous phase information for error traceback */
2619  restore_vacuum_error_info(vacrel, &saved_err_info);
2620  return index;
2621 }
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:835
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
#define XLOG_HEAP2_VACUUM
Definition: heapam_xlog.h:55
#define SizeOfHeapVacuum
Definition: heapam_xlog.h:267
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
Definition: type.h:95
uint64 XLogRecPtr
Definition: xlogdefs.h:21
void XLogRegisterData(char *data, uint32 len)
Definition: xloginsert.c:351
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:461
void XLogRegisterBufData(uint8 block_id, char *data, uint32 len)
Definition: xloginsert.c:392
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:243
void XLogBeginInsert(void)
Definition: xloginsert.c:150
#define REGBUF_STANDARD
Definition: xloginsert.h:34

References Assert(), BufferGetPage(), LVRelState::dead_items, LVRelState::do_index_vacuuming, END_CRIT_SECTION, heap_page_is_all_visible(), InvalidOffsetNumber, InvalidXLogRecPtr, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, ItemPointerGetBlockNumber(), ItemPointerGetOffsetNumber(), VacDeadItems::items, MarkBufferDirty(), MaxHeapTuplesPerPage, LVRelState::nindexes, VacDeadItems::num_items, xl_heap_vacuum::nunused, PageGetItemId(), PageIsAllVisible(), PageSetAllVisible(), PageSetLSN(), PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, REGBUF_STANDARD, LVRelState::rel, RelationNeedsWAL, restore_vacuum_error_info(), SizeOfHeapVacuum, START_CRIT_SECTION, TransactionIdIsValid, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), XLOG_HEAP2_VACUUM, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by lazy_scan_heap(), and lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2431 of file vacuumlazy.c.

2432 {
2433  int index = 0;
2434  BlockNumber vacuumed_pages = 0;
2435  Buffer vmbuffer = InvalidBuffer;
2436  LVSavedErrInfo saved_err_info;
2437 
2438  Assert(vacrel->do_index_vacuuming);
2439  Assert(vacrel->do_index_cleanup);
2440  Assert(vacrel->num_index_scans > 0);
2441 
2442  /* Report that we are now vacuuming the heap */
2445 
2446  /* Update error traceback information */
2447  update_vacuum_error_info(vacrel, &saved_err_info,
2450 
2451  while (index < vacrel->dead_items->num_items)
2452  {
2453  BlockNumber blkno;
2454  Buffer buf;
2455  Page page;
2456  Size freespace;
2457 
2459 
2460  blkno = ItemPointerGetBlockNumber(&vacrel->dead_items->items[index]);
2461  vacrel->blkno = blkno;
2462 
2463  /*
2464  * Pin the visibility map page in case we need to mark the page
2465  * all-visible. In most cases this will be very cheap, because we'll
2466  * already have the correct page pinned anyway.
2467  */
2468  visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
2469 
2470  /* We need a non-cleanup exclusive lock to mark dead_items unused */
2471  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
2472  vacrel->bstrategy);
2474  index = lazy_vacuum_heap_page(vacrel, blkno, buf, index, vmbuffer);
2475 
2476  /* Now that we've vacuumed the page, record its available space */
2477  page = BufferGetPage(buf);
2478  freespace = PageGetHeapFreeSpace(page);
2479 
2481  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
2482  vacuumed_pages++;
2483  }
2484 
2485  vacrel->blkno = InvalidBlockNumber;
2486  if (BufferIsValid(vmbuffer))
2487  ReleaseBuffer(vmbuffer);
2488 
2489  /*
2490  * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2491  * the second heap pass. No more, no less.
2492  */
2493  Assert(index > 0);
2494  Assert(vacrel->num_index_scans > 1 ||
2495  (index == vacrel->lpdead_items &&
2496  vacuumed_pages == vacrel->lpdead_item_pages));
2497 
2498  ereport(DEBUG2,
2499  (errmsg("table \"%s\": removed %lld dead item identifiers in %u pages",
2500  vacrel->relname, (long long) index, vacuumed_pages)));
2501 
2502  /* Revert to the previous phase information for error traceback */
2503  restore_vacuum_error_info(vacrel, &saved_err_info);
2504 }
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:34

References Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetPage(), BufferIsValid(), LVRelState::dead_items, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, ereport, errmsg(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, ItemPointerGetBlockNumber(), VacDeadItems::items, lazy_vacuum_heap_page(), LockBuffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, LVRelState::num_index_scans, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelState::rel, ReleaseBuffer(), LVRelState::relname, restore_vacuum_error_info(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, and visibilitymap_pin().

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 2757 of file vacuumlazy.c.

2759 {
2760  IndexVacuumInfo ivinfo;
2761  LVSavedErrInfo saved_err_info;
2762 
2763  ivinfo.index = indrel;
2764  ivinfo.heaprel = vacrel->rel;
2765  ivinfo.analyze_only = false;
2766  ivinfo.report_progress = false;
2767  ivinfo.estimated_count = true;
2768  ivinfo.message_level = DEBUG2;
2769  ivinfo.num_heap_tuples = reltuples;
2770  ivinfo.strategy = vacrel->bstrategy;
2771 
2772  /*
2773  * Update error traceback information.
2774  *
2775  * The index name is saved during this phase and restored immediately
2776  * after this phase. See vacuum_error_callback.
2777  */
2778  Assert(vacrel->indname == NULL);
2779  vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2780  update_vacuum_error_info(vacrel, &saved_err_info,
2783 
2784  /* Do bulk deletion */
2785  istat = vac_bulkdel_one_index(&ivinfo, istat, (void *) vacrel->dead_items);
2786 
2787  /* Revert to the previous phase information for error traceback */
2788  restore_vacuum_error_info(vacrel, &saved_err_info);
2789  pfree(vacrel->indname);
2790  vacrel->indname = NULL;
2791 
2792  return istat;
2793 }
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, VacDeadItems *dead_items)
Definition: vacuum.c:2476

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, LVRelState::dead_items, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3514 of file vacuumlazy.c.

3516 {
3517  vacrel->blkno = saved_vacrel->blkno;
3518  vacrel->offnum = saved_vacrel->offnum;
3519  vacrel->phase = saved_vacrel->phase;
3520 }
BlockNumber blkno
Definition: vacuumlazy.c:236
VacErrPhase phase
Definition: vacuumlazy.c:238
OffsetNumber offnum
Definition: vacuumlazy.c:237

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 2865 of file vacuumlazy.c.

2866 {
2867  BlockNumber possibly_freeable;
2868 
2869  if (!vacrel->do_rel_truncate || VacuumFailsafeActive)
2870  return false;
2871 
2872  possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
2873  if (possibly_freeable > 0 &&
2874  (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
2875  possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION))
2876  return true;
2877 
2878  return false;
2879 }
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:76
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:77

References LVRelState::do_rel_truncate, LVRelState::nonempty_pages, LVRelState::rel_pages, REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, and VacuumFailsafeActive.

Referenced by heap_vacuum_rel().

◆ update_relstats_all_indexes()

static void update_relstats_all_indexes ( LVRelState vacrel)
static

Definition at line 3396 of file vacuumlazy.c.

3397 {
3398  Relation *indrels = vacrel->indrels;
3399  int nindexes = vacrel->nindexes;
3400  IndexBulkDeleteResult **indstats = vacrel->indstats;
3401 
3402  Assert(vacrel->do_index_cleanup);
3403 
3404  for (int idx = 0; idx < nindexes; idx++)
3405  {
3406  Relation indrel = indrels[idx];
3407  IndexBulkDeleteResult *istat = indstats[idx];
3408 
3409  if (istat == NULL || istat->estimated_count)
3410  continue;
3411 
3412  /* Update index statistics */
3413  vac_update_relstats(indrel,
3414  istat->num_pages,
3415  istat->num_index_tuples,
3416  0,
3417  false,
3420  NULL, NULL, false);
3421  }
3422 }
bool estimated_count
Definition: genam.h:78
double num_index_tuples
Definition: genam.h:79

References Assert(), LVRelState::do_index_cleanup, IndexBulkDeleteResult::estimated_count, idx(), LVRelState::indrels, LVRelState::indstats, InvalidMultiXactId, InvalidTransactionId, LVRelState::nindexes, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by heap_vacuum_rel().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3495 of file vacuumlazy.c.

3497 {
3498  if (saved_vacrel)
3499  {
3500  saved_vacrel->offnum = vacrel->offnum;
3501  saved_vacrel->blkno = vacrel->blkno;
3502  saved_vacrel->phase = vacrel->phase;
3503  }
3504 
3505  vacrel->blkno = blkno;
3506  vacrel->offnum = offnum;
3507  vacrel->phase = phase;
3508 }

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void *  arg)
static

Definition at line 3431 of file vacuumlazy.c.

3432 {
3433  LVRelState *errinfo = arg;
3434 
3435  switch (errinfo->phase)
3436  {
3438  if (BlockNumberIsValid(errinfo->blkno))
3439  {
3440  if (OffsetNumberIsValid(errinfo->offnum))
3441  errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3442  errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3443  else
3444  errcontext("while scanning block %u of relation \"%s.%s\"",
3445  errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3446  }
3447  else
3448  errcontext("while scanning relation \"%s.%s\"",
3449  errinfo->relnamespace, errinfo->relname);
3450  break;
3451 
3453  if (BlockNumberIsValid(errinfo->blkno))
3454  {
3455  if (OffsetNumberIsValid(errinfo->offnum))
3456  errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3457  errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3458  else
3459  errcontext("while vacuuming block %u of relation \"%s.%s\"",
3460  errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3461  }
3462  else
3463  errcontext("while vacuuming relation \"%s.%s\"",
3464  errinfo->relnamespace, errinfo->relname);
3465  break;
3466 
3468  errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3469  errinfo->indname, errinfo->relnamespace, errinfo->relname);
3470  break;
3471 
3473  errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3474  errinfo->indname, errinfo->relnamespace, errinfo->relname);
3475  break;
3476 
3478  if (BlockNumberIsValid(errinfo->blkno))
3479  errcontext("while truncating relation \"%s.%s\" to %u blocks",
3480  errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3481  break;
3482 
3484  default:
3485  return; /* do nothing; the errinfo may not be
3486  * initialized */
3487  }
3488 }
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define errcontext
Definition: elog.h:196
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
void * arg

References arg, LVRelState::blkno, BlockNumberIsValid(), errcontext, LVRelState::indname, LVRelState::offnum, OffsetNumberIsValid, LVRelState::phase, LVRelState::relname, LVRelState::relnamespace, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().