PostgreSQL Source Code  git master
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/amapi.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "catalog/index.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "optimizer/paths.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "tcop/tcopprot.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVPagePruneState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVPagePruneState LVPagePruneState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel)
 
static BlockNumber lazy_scan_skip (LVRelState *vacrel, Buffer *vmbuffer, BlockNumber next_block, bool *next_unskippable_allvis, bool *skipping_current_range)
 
static bool lazy_scan_new_or_empty (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
 
static void lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, LVPagePruneState *prunestate)
 
static bool lazy_scan_noprune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *hastup, bool *recordfreespace)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static int lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer vmbuffer)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_is_all_visible (LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
static void update_relstats_all_indexes (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static int dead_items_max_items (LVRelState *vacrel)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 94 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 100 of file vacuumlazy.c.

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 128 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 122 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 77 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 76 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 116 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 109 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 86 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 88 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 87 of file vacuumlazy.c.

Typedef Documentation

◆ LVPagePruneState

◆ LVRelState

typedef struct LVRelState LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 131 of file vacuumlazy.c.

132 {
139 } VacErrPhase;
VacErrPhase
Definition: vacuumlazy.c:132
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition: vacuumlazy.c:134
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition: vacuumlazy.c:135
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition: vacuumlazy.c:138
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition: vacuumlazy.c:137
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition: vacuumlazy.c:136
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition: vacuumlazy.c:133

Function Documentation

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool lock_waiter_detected 
)
static

Definition at line 2962 of file vacuumlazy.c.

2963 {
2964  BlockNumber blkno;
2965  BlockNumber prefetchedUntil;
2966  instr_time starttime;
2967 
2968  /* Initialize the starttime if we check for conflicting lock requests */
2969  INSTR_TIME_SET_CURRENT(starttime);
2970 
2971  /*
2972  * Start checking blocks at what we believe relation end to be and move
2973  * backwards. (Strange coding of loop control is needed because blkno is
2974  * unsigned.) To make the scan faster, we prefetch a few blocks at a time
2975  * in forward direction, so that OS-level readahead can kick in.
2976  */
2977  blkno = vacrel->rel_pages;
2979  "prefetch size must be power of 2");
2980  prefetchedUntil = InvalidBlockNumber;
2981  while (blkno > vacrel->nonempty_pages)
2982  {
2983  Buffer buf;
2984  Page page;
2985  OffsetNumber offnum,
2986  maxoff;
2987  bool hastup;
2988 
2989  /*
2990  * Check if another process requests a lock on our relation. We are
2991  * holding an AccessExclusiveLock here, so they will be waiting. We
2992  * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
2993  * only check if that interval has elapsed once every 32 blocks to
2994  * keep the number of system calls and actual shared lock table
2995  * lookups to a minimum.
2996  */
2997  if ((blkno % 32) == 0)
2998  {
2999  instr_time currenttime;
3000  instr_time elapsed;
3001 
3002  INSTR_TIME_SET_CURRENT(currenttime);
3003  elapsed = currenttime;
3004  INSTR_TIME_SUBTRACT(elapsed, starttime);
3005  if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
3007  {
3009  {
3010  ereport(vacrel->verbose ? INFO : DEBUG2,
3011  (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
3012  vacrel->relname)));
3013 
3014  *lock_waiter_detected = true;
3015  return blkno;
3016  }
3017  starttime = currenttime;
3018  }
3019  }
3020 
3021  /*
3022  * We don't insert a vacuum delay point here, because we have an
3023  * exclusive lock on the table which we want to hold for as short a
3024  * time as possible. We still need to check for interrupts however.
3025  */
3027 
3028  blkno--;
3029 
3030  /* If we haven't prefetched this lot yet, do so now. */
3031  if (prefetchedUntil > blkno)
3032  {
3033  BlockNumber prefetchStart;
3034  BlockNumber pblkno;
3035 
3036  prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
3037  for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3038  {
3039  PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
3041  }
3042  prefetchedUntil = prefetchStart;
3043  }
3044 
3045  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
3046  vacrel->bstrategy);
3047 
3048  /* In this phase we only need shared access to the buffer */
3050 
3051  page = BufferGetPage(buf);
3052 
3053  if (PageIsNew(page) || PageIsEmpty(page))
3054  {
3056  continue;
3057  }
3058 
3059  hastup = false;
3060  maxoff = PageGetMaxOffsetNumber(page);
3061  for (offnum = FirstOffsetNumber;
3062  offnum <= maxoff;
3063  offnum = OffsetNumberNext(offnum))
3064  {
3065  ItemId itemid;
3066 
3067  itemid = PageGetItemId(page, offnum);
3068 
3069  /*
3070  * Note: any non-unused item should be taken as a reason to keep
3071  * this page. Even an LP_DEAD item makes truncation unsafe, since
3072  * we must not have cleaned out its index entries.
3073  */
3074  if (ItemIdIsUsed(itemid))
3075  {
3076  hastup = true;
3077  break; /* can stop scanning */
3078  }
3079  } /* scan along page */
3080 
3082 
3083  /* Done scanning if we found a tuple here */
3084  if (hastup)
3085  return blkno + 1;
3086  }
3087 
3088  /*
3089  * If we fall out of the loop, all the previously-thought-to-be-empty
3090  * pages still are; we need not bother to look at the last known-nonempty
3091  * page.
3092  */
3093  return vacrel->nonempty_pages;
3094 }
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:584
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4008
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4226
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:751
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:111
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:284
@ RBM_NORMAL
Definition: bufmgr.h:44
static bool PageIsEmpty(Page page)
Definition: bufpage.h:220
Pointer Page
Definition: bufpage.h:78
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static bool PageIsNew(Page page)
Definition: bufpage.h:230
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:922
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define DEBUG2
Definition: elog.h:29
#define INFO
Definition: elog.h:34
#define ereport(elevel,...)
Definition: elog.h:149
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:194
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:374
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
static char * buf
Definition: pg_test_fsync.c:67
@ MAIN_FORKNUM
Definition: relpath.h:50
bool verbose
Definition: vacuumlazy.c:182
BlockNumber nonempty_pages
Definition: vacuumlazy.c:198
Relation rel
Definition: vacuumlazy.c:144
BlockNumber rel_pages
Definition: vacuumlazy.c:192
BufferAccessStrategy bstrategy
Definition: vacuumlazy.c:149
char * relname
Definition: vacuumlazy.c:177
#define PREFETCH_SIZE
Definition: vacuumlazy.c:122
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:86

References AccessExclusiveLock, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BufferGetPage(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelState::nonempty_pages, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageIsNew(), PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelState::rel, LVRelState::rel_pages, LVRelState::relname, StaticAssertStmt, UnlockReleaseBuffer(), VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and LVRelState::verbose.

Referenced by lazy_truncate_heap().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 3144 of file vacuumlazy.c.

3145 {
3146  VacDeadItems *dead_items;
3147  int max_items;
3148 
3149  max_items = dead_items_max_items(vacrel);
3150  Assert(max_items >= MaxHeapTuplesPerPage);
3151 
3152  /*
3153  * Initialize state for a parallel vacuum. As of now, only one worker can
3154  * be used for an index, so we invoke parallelism only if there are at
3155  * least two indexes on a table.
3156  */
3157  if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
3158  {
3159  /*
3160  * Since parallel workers cannot access data in temporary tables, we
3161  * can't perform parallel vacuum on them.
3162  */
3163  if (RelationUsesLocalBuffers(vacrel->rel))
3164  {
3165  /*
3166  * Give warning only if the user explicitly tries to perform a
3167  * parallel vacuum on the temporary table.
3168  */
3169  if (nworkers > 0)
3170  ereport(WARNING,
3171  (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3172  vacrel->relname)));
3173  }
3174  else
3175  vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
3176  vacrel->nindexes, nworkers,
3177  max_items,
3178  vacrel->verbose ? INFO : DEBUG2,
3179  vacrel->bstrategy);
3180 
3181  /* If parallel mode started, dead_items space is allocated in DSM */
3182  if (ParallelVacuumIsActive(vacrel))
3183  {
3184  vacrel->dead_items = parallel_vacuum_get_dead_items(vacrel->pvs);
3185  return;
3186  }
3187  }
3188 
3189  /* Serial VACUUM case */
3190  dead_items = (VacDeadItems *) palloc(vac_max_items_to_alloc_size(max_items));
3191  dead_items->max_items = max_items;
3192  dead_items->num_items = 0;
3193 
3194  vacrel->dead_items = dead_items;
3195 }
#define WARNING
Definition: elog.h:36
#define MaxHeapTuplesPerPage
Definition: htup_details.h:572
Assert(fmt[strlen(fmt) - 1] !='\n')
void * palloc(Size size)
Definition: mcxt.c:1210
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:637
ParallelVacuumState * pvs
Definition: vacuumlazy.c:150
int nindexes
Definition: vacuumlazy.c:146
Relation * indrels
Definition: vacuumlazy.c:145
VacDeadItems * dead_items
Definition: vacuumlazy.c:191
bool do_index_vacuuming
Definition: vacuumlazy.c:162
int max_items
Definition: vacuum.h:285
int num_items
Definition: vacuum.h:286
Size vac_max_items_to_alloc_size(int max_items)
Definition: vacuum.c:2383
#define ParallelVacuumIsActive(vacrel)
Definition: vacuumlazy.c:128
static int dead_items_max_items(LVRelState *vacrel)
Definition: vacuumlazy.c:3105
VacDeadItems * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int max_items, int elevel, BufferAccessStrategy bstrategy)

References Assert(), LVRelState::bstrategy, LVRelState::dead_items, dead_items_max_items(), DEBUG2, LVRelState::do_index_vacuuming, ereport, errmsg(), LVRelState::indrels, INFO, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::nindexes, VacDeadItems::num_items, palloc(), parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, LVRelState::pvs, LVRelState::rel, RelationUsesLocalBuffers, LVRelState::relname, vac_max_items_to_alloc_size(), LVRelState::verbose, and WARNING.

Referenced by heap_vacuum_rel().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 3201 of file vacuumlazy.c.

3202 {
3203  if (!ParallelVacuumIsActive(vacrel))
3204  {
3205  /* Don't bother with pfree here */
3206  return;
3207  }
3208 
3209  /* End parallel mode */
3210  parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
3211  vacrel->pvs = NULL;
3212 }
IndexBulkDeleteResult ** indstats
Definition: vacuumlazy.c:204
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)

References LVRelState::indstats, parallel_vacuum_end(), ParallelVacuumIsActive, and LVRelState::pvs.

Referenced by heap_vacuum_rel().

◆ dead_items_max_items()

static int dead_items_max_items ( LVRelState vacrel)
static

Definition at line 3105 of file vacuumlazy.c.

3106 {
3107  int64 max_items;
3108  int vac_work_mem = IsAutoVacuumWorkerProcess() &&
3109  autovacuum_work_mem != -1 ?
3111 
3112  if (vacrel->nindexes > 0)
3113  {
3114  BlockNumber rel_pages = vacrel->rel_pages;
3115 
3116  max_items = MAXDEADITEMS(vac_work_mem * 1024L);
3117  max_items = Min(max_items, INT_MAX);
3118  max_items = Min(max_items, MAXDEADITEMS(MaxAllocSize));
3119 
3120  /* curious coding here to ensure the multiplication can't overflow */
3121  if ((BlockNumber) (max_items / MaxHeapTuplesPerPage) > rel_pages)
3122  max_items = rel_pages * MaxHeapTuplesPerPage;
3123 
3124  /* stay sane if small maintenance_work_mem */
3125  max_items = Max(max_items, MaxHeapTuplesPerPage);
3126  }
3127  else
3128  {
3129  /* One-pass case only stores a single heap page's TIDs at a time */
3130  max_items = MaxHeapTuplesPerPage;
3131  }
3132 
3133  return (int) max_items;
3134 }
int autovacuum_work_mem
Definition: autovacuum.c:118
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3324
#define Min(x, y)
Definition: c.h:988
#define Max(x, y)
Definition: c.h:982
int maintenance_work_mem
Definition: globals.c:127
#define MaxAllocSize
Definition: memutils.h:40
#define MAXDEADITEMS(avail_mem)
Definition: vacuum.h:292

References autovacuum_work_mem, IsAutoVacuumWorkerProcess(), maintenance_work_mem, Max, MaxAllocSize, MAXDEADITEMS, MaxHeapTuplesPerPage, Min, LVRelState::nindexes, and LVRelState::rel_pages.

Referenced by dead_items_alloc().

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( LVRelState vacrel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool all_frozen 
)
static

Definition at line 3226 of file vacuumlazy.c.

3229 {
3230  Page page = BufferGetPage(buf);
3232  OffsetNumber offnum,
3233  maxoff;
3234  bool all_visible = true;
3235 
3236  *visibility_cutoff_xid = InvalidTransactionId;
3237  *all_frozen = true;
3238 
3239  maxoff = PageGetMaxOffsetNumber(page);
3240  for (offnum = FirstOffsetNumber;
3241  offnum <= maxoff && all_visible;
3242  offnum = OffsetNumberNext(offnum))
3243  {
3244  ItemId itemid;
3245  HeapTupleData tuple;
3246 
3247  /*
3248  * Set the offset number so that we can display it along with any
3249  * error that occurred while processing this tuple.
3250  */
3251  vacrel->offnum = offnum;
3252  itemid = PageGetItemId(page, offnum);
3253 
3254  /* Unused or redirect line pointers are of no interest */
3255  if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
3256  continue;
3257 
3258  ItemPointerSet(&(tuple.t_self), blockno, offnum);
3259 
3260  /*
3261  * Dead line pointers can have index pointers pointing to them. So
3262  * they can't be treated as visible
3263  */
3264  if (ItemIdIsDead(itemid))
3265  {
3266  all_visible = false;
3267  *all_frozen = false;
3268  break;
3269  }
3270 
3271  Assert(ItemIdIsNormal(itemid));
3272 
3273  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3274  tuple.t_len = ItemIdGetLength(itemid);
3275  tuple.t_tableOid = RelationGetRelid(vacrel->rel);
3276 
3277  switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
3278  buf))
3279  {
3280  case HEAPTUPLE_LIVE:
3281  {
3282  TransactionId xmin;
3283 
3284  /* Check comments in lazy_scan_prune. */
3286  {
3287  all_visible = false;
3288  *all_frozen = false;
3289  break;
3290  }
3291 
3292  /*
3293  * The inserter definitely committed. But is it old enough
3294  * that everyone sees it as committed?
3295  */
3296  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3297  if (!TransactionIdPrecedes(xmin,
3298  vacrel->cutoffs.OldestXmin))
3299  {
3300  all_visible = false;
3301  *all_frozen = false;
3302  break;
3303  }
3304 
3305  /* Track newest xmin on page. */
3306  if (TransactionIdFollows(xmin, *visibility_cutoff_xid) &&
3307  TransactionIdIsNormal(xmin))
3308  *visibility_cutoff_xid = xmin;
3309 
3310  /* Check whether this tuple is already frozen or not */
3311  if (all_visible && *all_frozen &&
3313  *all_frozen = false;
3314  }
3315  break;
3316 
3317  case HEAPTUPLE_DEAD:
3321  {
3322  all_visible = false;
3323  *all_frozen = false;
3324  break;
3325  }
3326  default:
3327  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3328  break;
3329  }
3330  } /* scan along page */
3331 
3332  /* Clear the offset information once we have processed the given page. */
3333  vacrel->offnum = InvalidOffsetNumber;
3334 
3335  return all_visible;
3336 }
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2791
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
uint32 TransactionId
Definition: c.h:636
#define ERROR
Definition: elog.h:39
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7276
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:98
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:99
@ HEAPTUPLE_LIVE
Definition: heapam.h:97
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:100
@ HEAPTUPLE_DEAD
Definition: heapam.h:96
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:309
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:320
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define InvalidOffsetNumber
Definition: off.h:26
#define RelationGetRelid(relation)
Definition: rel.h:503
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
OffsetNumber offnum
Definition: vacuumlazy.c:180
struct VacuumCutoffs cutoffs
Definition: vacuumlazy.c:167
TransactionId OldestXmin
Definition: vacuum.h:266
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:280
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:314
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdIsNormal(xid)
Definition: transam.h:42

References Assert(), buf, BufferGetBlockNumber(), BufferGetPage(), LVRelState::cutoffs, elog(), ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, and TransactionIdPrecedes().

Referenced by lazy_scan_prune(), and lazy_vacuum_heap_page().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 305 of file vacuumlazy.c.

307 {
308  LVRelState *vacrel;
309  bool verbose,
310  instrument,
311  skipwithvm,
312  frozenxid_updated,
313  minmulti_updated;
314  BlockNumber orig_rel_pages,
315  new_rel_pages,
316  new_rel_allvisible;
317  PGRUsage ru0;
318  TimestampTz starttime = 0;
319  PgStat_Counter startreadtime = 0,
320  startwritetime = 0;
321  WalUsage startwalusage = pgWalUsage;
322  int64 StartPageHit = VacuumPageHit,
323  StartPageMiss = VacuumPageMiss,
324  StartPageDirty = VacuumPageDirty;
325  ErrorContextCallback errcallback;
326  char **indnames = NULL;
327 
328  verbose = (params->options & VACOPT_VERBOSE) != 0;
329  instrument = (verbose || (IsAutoVacuumWorkerProcess() &&
330  params->log_min_duration >= 0));
331  if (instrument)
332  {
333  pg_rusage_init(&ru0);
334  starttime = GetCurrentTimestamp();
335  if (track_io_timing)
336  {
337  startreadtime = pgStatBlockReadTime;
338  startwritetime = pgStatBlockWriteTime;
339  }
340  }
341 
343  RelationGetRelid(rel));
344 
345  /*
346  * Setup error traceback support for ereport() first. The idea is to set
347  * up an error context callback to display additional information on any
348  * error during a vacuum. During different phases of vacuum, we update
349  * the state so that the error context callback always display current
350  * information.
351  *
352  * Copy the names of heap rel into local memory for error reporting
353  * purposes, too. It isn't always safe to assume that we can get the name
354  * of each rel. It's convenient for code in lazy_scan_heap to always use
355  * these temp copies.
356  */
357  vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
360  vacrel->relname = pstrdup(RelationGetRelationName(rel));
361  vacrel->indname = NULL;
363  vacrel->verbose = verbose;
364  errcallback.callback = vacuum_error_callback;
365  errcallback.arg = vacrel;
366  errcallback.previous = error_context_stack;
367  error_context_stack = &errcallback;
368 
369  /* Set up high level stuff about rel and its indexes */
370  vacrel->rel = rel;
371  vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes,
372  &vacrel->indrels);
373  vacrel->bstrategy = bstrategy;
374  if (instrument && vacrel->nindexes > 0)
375  {
376  /* Copy index names used by instrumentation (not error reporting) */
377  indnames = palloc(sizeof(char *) * vacrel->nindexes);
378  for (int i = 0; i < vacrel->nindexes; i++)
379  indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
380  }
381 
382  /*
383  * The index_cleanup param either disables index vacuuming and cleanup or
384  * forces it to go ahead when we would otherwise apply the index bypass
385  * optimization. The default is 'auto', which leaves the final decision
386  * up to lazy_vacuum().
387  *
388  * The truncate param allows user to avoid attempting relation truncation,
389  * though it can't force truncation to happen.
390  */
393  params->truncate != VACOPTVALUE_AUTO);
394  vacrel->failsafe_active = false;
395  vacrel->consider_bypass_optimization = true;
396  vacrel->do_index_vacuuming = true;
397  vacrel->do_index_cleanup = true;
398  vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
399  if (params->index_cleanup == VACOPTVALUE_DISABLED)
400  {
401  /* Force disable index vacuuming up-front */
402  vacrel->do_index_vacuuming = false;
403  vacrel->do_index_cleanup = false;
404  }
405  else if (params->index_cleanup == VACOPTVALUE_ENABLED)
406  {
407  /* Force index vacuuming. Note that failsafe can still bypass. */
408  vacrel->consider_bypass_optimization = false;
409  }
410  else
411  {
412  /* Default/auto, make all decisions dynamically */
414  }
415 
416  /* Initialize page counters explicitly (be tidy) */
417  vacrel->scanned_pages = 0;
418  vacrel->removed_pages = 0;
419  vacrel->frozen_pages = 0;
420  vacrel->lpdead_item_pages = 0;
421  vacrel->missed_dead_pages = 0;
422  vacrel->nonempty_pages = 0;
423  /* dead_items_alloc allocates vacrel->dead_items later on */
424 
425  /* Allocate/initialize output statistics state */
426  vacrel->new_rel_tuples = 0;
427  vacrel->new_live_tuples = 0;
428  vacrel->indstats = (IndexBulkDeleteResult **)
429  palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
430 
431  /* Initialize remaining counters (be tidy) */
432  vacrel->num_index_scans = 0;
433  vacrel->tuples_deleted = 0;
434  vacrel->tuples_frozen = 0;
435  vacrel->lpdead_items = 0;
436  vacrel->live_tuples = 0;
437  vacrel->recently_dead_tuples = 0;
438  vacrel->missed_dead_tuples = 0;
439 
440  /*
441  * Get cutoffs that determine which deleted tuples are considered DEAD,
442  * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
443  * the extent of the blocks that we'll scan in lazy_scan_heap. It has to
444  * happen in this order to ensure that the OldestXmin cutoff field works
445  * as an upper bound on the XIDs stored in the pages we'll actually scan
446  * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
447  *
448  * Next acquire vistest, a related cutoff that's used in heap_page_prune.
449  * We expect vistest will always make heap_page_prune remove any deleted
450  * tuple whose xmax is < OldestXmin. lazy_scan_prune must never become
451  * confused about whether a tuple should be frozen or removed. (In the
452  * future we might want to teach lazy_scan_prune to recompute vistest from
453  * time to time, to increase the number of dead tuples it can prune away.)
454  */
455  vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
456  vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
457  vacrel->vistest = GlobalVisTestFor(rel);
458  /* Initialize state used to track oldest extant XID/MXID */
459  vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
460  vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
461  vacrel->skippedallvis = false;
462  skipwithvm = true;
464  {
465  /*
466  * Force aggressive mode, and disable skipping blocks using the
467  * visibility map (even those set all-frozen)
468  */
469  vacrel->aggressive = true;
470  skipwithvm = false;
471  }
472 
473  vacrel->skipwithvm = skipwithvm;
474 
475  if (verbose)
476  {
477  if (vacrel->aggressive)
478  ereport(INFO,
479  (errmsg("aggressively vacuuming \"%s.%s.%s\"",
480  vacrel->dbname, vacrel->relnamespace,
481  vacrel->relname)));
482  else
483  ereport(INFO,
484  (errmsg("vacuuming \"%s.%s.%s\"",
485  vacrel->dbname, vacrel->relnamespace,
486  vacrel->relname)));
487  }
488 
489  /*
490  * Allocate dead_items array memory using dead_items_alloc. This handles
491  * parallel VACUUM initialization as part of allocating shared memory
492  * space used for dead_items. (But do a failsafe precheck first, to
493  * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
494  * is already dangerously old.)
495  */
497  dead_items_alloc(vacrel, params->nworkers);
498 
499  /*
500  * Call lazy_scan_heap to perform all required heap pruning, index
501  * vacuuming, and heap vacuuming (plus related processing)
502  */
503  lazy_scan_heap(vacrel);
504 
505  /*
506  * Free resources managed by dead_items_alloc. This ends parallel mode in
507  * passing when necessary.
508  */
509  dead_items_cleanup(vacrel);
511 
512  /*
513  * Update pg_class entries for each of rel's indexes where appropriate.
514  *
515  * Unlike the later update to rel's pg_class entry, this is not critical.
516  * Maintains relpages/reltuples statistics used by the planner only.
517  */
518  if (vacrel->do_index_cleanup)
520 
521  /* Done with rel's indexes */
522  vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
523 
524  /* Optionally truncate rel */
525  if (should_attempt_truncation(vacrel))
526  lazy_truncate_heap(vacrel);
527 
528  /* Pop the error context stack */
529  error_context_stack = errcallback.previous;
530 
531  /* Report that we are now doing final cleanup */
534 
535  /*
536  * Prepare to update rel's pg_class entry.
537  *
538  * Aggressive VACUUMs must always be able to advance relfrozenxid to a
539  * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
540  * Non-aggressive VACUUMs may advance them by any amount, or not at all.
541  */
542  Assert(vacrel->NewRelfrozenXid == vacrel->cutoffs.OldestXmin ||
544  vacrel->cutoffs.relfrozenxid,
545  vacrel->NewRelfrozenXid));
546  Assert(vacrel->NewRelminMxid == vacrel->cutoffs.OldestMxact ||
548  vacrel->cutoffs.relminmxid,
549  vacrel->NewRelminMxid));
550  if (vacrel->skippedallvis)
551  {
552  /*
553  * Must keep original relfrozenxid in a non-aggressive VACUUM that
554  * chose to skip an all-visible page range. The state that tracks new
555  * values will have missed unfrozen XIDs from the pages we skipped.
556  */
557  Assert(!vacrel->aggressive);
560  }
561 
562  /*
563  * For safety, clamp relallvisible to be not more than what we're setting
564  * pg_class.relpages to
565  */
566  new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
567  visibilitymap_count(rel, &new_rel_allvisible, NULL);
568  if (new_rel_allvisible > new_rel_pages)
569  new_rel_allvisible = new_rel_pages;
570 
571  /*
572  * Now actually update rel's pg_class entry.
573  *
574  * In principle new_live_tuples could be -1 indicating that we (still)
575  * don't know the tuple count. In practice that can't happen, since we
576  * scan every page that isn't skipped using the visibility map.
577  */
578  vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
579  new_rel_allvisible, vacrel->nindexes > 0,
580  vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
581  &frozenxid_updated, &minmulti_updated, false);
582 
583  /*
584  * Report results to the cumulative stats system, too.
585  *
586  * Deliberately avoid telling the stats system about LP_DEAD items that
587  * remain in the table due to VACUUM bypassing index and heap vacuuming.
588  * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
589  * It seems like a good idea to err on the side of not vacuuming again too
590  * soon in cases where the failsafe prevented significant amounts of heap
591  * vacuuming.
592  */
594  rel->rd_rel->relisshared,
595  Max(vacrel->new_live_tuples, 0),
596  vacrel->recently_dead_tuples +
597  vacrel->missed_dead_tuples);
599 
600  if (instrument)
601  {
602  TimestampTz endtime = GetCurrentTimestamp();
603 
604  if (verbose || params->log_min_duration == 0 ||
605  TimestampDifferenceExceeds(starttime, endtime,
606  params->log_min_duration))
607  {
608  long secs_dur;
609  int usecs_dur;
610  WalUsage walusage;
612  char *msgfmt;
613  int32 diff;
614  int64 PageHitOp = VacuumPageHit - StartPageHit,
615  PageMissOp = VacuumPageMiss - StartPageMiss,
616  PageDirtyOp = VacuumPageDirty - StartPageDirty;
617  double read_rate = 0,
618  write_rate = 0;
619 
620  TimestampDifference(starttime, endtime, &secs_dur, &usecs_dur);
621  memset(&walusage, 0, sizeof(WalUsage));
622  WalUsageAccumDiff(&walusage, &pgWalUsage, &startwalusage);
623 
625  if (verbose)
626  {
627  /*
628  * Aggressiveness already reported earlier, in dedicated
629  * VACUUM VERBOSE ereport
630  */
631  Assert(!params->is_wraparound);
632  msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
633  }
634  else if (params->is_wraparound)
635  {
636  /*
637  * While it's possible for a VACUUM to be both is_wraparound
638  * and !aggressive, that's just a corner-case -- is_wraparound
639  * implies aggressive. Produce distinct output for the corner
640  * case all the same, just in case.
641  */
642  if (vacrel->aggressive)
643  msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
644  else
645  msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
646  }
647  else
648  {
649  if (vacrel->aggressive)
650  msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
651  else
652  msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
653  }
654  appendStringInfo(&buf, msgfmt,
655  vacrel->dbname,
656  vacrel->relnamespace,
657  vacrel->relname,
658  vacrel->num_index_scans);
659  appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total)\n"),
660  vacrel->removed_pages,
661  new_rel_pages,
662  vacrel->scanned_pages,
663  orig_rel_pages == 0 ? 100.0 :
664  100.0 * vacrel->scanned_pages / orig_rel_pages);
666  _("tuples: %lld removed, %lld remain, %lld are dead but not yet removable\n"),
667  (long long) vacrel->tuples_deleted,
668  (long long) vacrel->new_rel_tuples,
669  (long long) vacrel->recently_dead_tuples);
670  if (vacrel->missed_dead_tuples > 0)
672  _("tuples missed: %lld dead from %u pages not removed due to cleanup lock contention\n"),
673  (long long) vacrel->missed_dead_tuples,
674  vacrel->missed_dead_pages);
675  diff = (int32) (ReadNextTransactionId() -
676  vacrel->cutoffs.OldestXmin);
678  _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
679  vacrel->cutoffs.OldestXmin, diff);
680  if (frozenxid_updated)
681  {
682  diff = (int32) (vacrel->NewRelfrozenXid -
683  vacrel->cutoffs.relfrozenxid);
685  _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
686  vacrel->NewRelfrozenXid, diff);
687  }
688  if (minmulti_updated)
689  {
690  diff = (int32) (vacrel->NewRelminMxid -
691  vacrel->cutoffs.relminmxid);
693  _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
694  vacrel->NewRelminMxid, diff);
695  }
696  appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %lld tuples frozen\n"),
697  vacrel->frozen_pages,
698  orig_rel_pages == 0 ? 100.0 :
699  100.0 * vacrel->frozen_pages / orig_rel_pages,
700  (long long) vacrel->tuples_frozen);
701  if (vacrel->do_index_vacuuming)
702  {
703  if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
704  appendStringInfoString(&buf, _("index scan not needed: "));
705  else
706  appendStringInfoString(&buf, _("index scan needed: "));
707 
708  msgfmt = _("%u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n");
709  }
710  else
711  {
712  if (!vacrel->failsafe_active)
713  appendStringInfoString(&buf, _("index scan bypassed: "));
714  else
715  appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
716 
717  msgfmt = _("%u pages from table (%.2f%% of total) have %lld dead item identifiers\n");
718  }
719  appendStringInfo(&buf, msgfmt,
720  vacrel->lpdead_item_pages,
721  orig_rel_pages == 0 ? 100.0 :
722  100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
723  (long long) vacrel->lpdead_items);
724  for (int i = 0; i < vacrel->nindexes; i++)
725  {
726  IndexBulkDeleteResult *istat = vacrel->indstats[i];
727 
728  if (!istat)
729  continue;
730 
732  _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
733  indnames[i],
734  istat->num_pages,
735  istat->pages_newly_deleted,
736  istat->pages_deleted,
737  istat->pages_free);
738  }
739  if (track_io_timing)
740  {
741  double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
742  double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
743 
744  appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
745  read_ms, write_ms);
746  }
747  if (secs_dur > 0 || usecs_dur > 0)
748  {
749  read_rate = (double) BLCKSZ * PageMissOp / (1024 * 1024) /
750  (secs_dur + usecs_dur / 1000000.0);
751  write_rate = (double) BLCKSZ * PageDirtyOp / (1024 * 1024) /
752  (secs_dur + usecs_dur / 1000000.0);
753  }
754  appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
755  read_rate, write_rate);
757  _("buffer usage: %lld hits, %lld misses, %lld dirtied\n"),
758  (long long) PageHitOp,
759  (long long) PageMissOp,
760  (long long) PageDirtyOp);
762  _("WAL usage: %lld records, %lld full page images, %llu bytes\n"),
763  (long long) walusage.wal_records,
764  (long long) walusage.wal_fpi,
765  (unsigned long long) walusage.wal_bytes);
766  appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
767 
768  ereport(verbose ? INFO : LOG,
769  (errmsg_internal("%s", buf.data)));
770  pfree(buf.data);
771  }
772  }
773 
774  /* Cleanup index statistics and index names */
775  for (int i = 0; i < vacrel->nindexes; i++)
776  {
777  if (vacrel->indstats[i])
778  pfree(vacrel->indstats[i]);
779 
780  if (instrument)
781  pfree(indnames[i]);
782  }
783 }
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1667
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1727
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1582
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
bool track_io_timing
Definition: bufmgr.c:137
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:161
signed int int32
Definition: c.h:478
int64 TimestampTz
Definition: timestamp.h:39
char * get_database_name(Oid dbid)
Definition: dbcommands.c:3023
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1156
ErrorContextCallback * error_context_stack
Definition: elog.c:95
#define _(x)
Definition: elog.c:91
#define LOG
Definition: elog.h:31
int64 VacuumPageHit
Definition: globals.c:148
int64 VacuumPageMiss
Definition: globals.c:149
int64 VacuumPageDirty
Definition: globals.c:150
Oid MyDatabaseId
Definition: globals.c:89
int verbose
WalUsage pgWalUsage
Definition: instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition: instrument.c:280
int i
Definition: isn.c:73
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3324
char * pstrdup(const char *in)
Definition: mcxt.c:1624
void pfree(void *pointer)
Definition: mcxt.c:1436
void * palloc0(Size size)
Definition: mcxt.c:1241
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3170
#define InvalidMultiXactId
Definition: multixact.h:24
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int64 PgStat_Counter
Definition: pgstat.h:89
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4091
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:35
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define RelationGetRelationName(relation)
Definition: rel.h:537
#define RelationGetNamespace(relation)
Definition: rel.h:544
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:176
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
struct ErrorContextCallback * previous
Definition: elog.h:295
void(* callback)(void *arg)
Definition: elog.h:296
BlockNumber pages_deleted
Definition: genam.h:81
BlockNumber pages_newly_deleted
Definition: genam.h:80
BlockNumber pages_free
Definition: genam.h:82
BlockNumber num_pages
Definition: genam.h:76
int64 tuples_deleted
Definition: vacuumlazy.c:209
bool do_rel_truncate
Definition: vacuumlazy.c:164
BlockNumber scanned_pages
Definition: vacuumlazy.c:193
bool aggressive
Definition: vacuumlazy.c:153
bool failsafe_active
Definition: vacuumlazy.c:157
GlobalVisState * vistest
Definition: vacuumlazy.c:168
BlockNumber removed_pages
Definition: vacuumlazy.c:194
int num_index_scans
Definition: vacuumlazy.c:207
double new_live_tuples
Definition: vacuumlazy.c:202
double new_rel_tuples
Definition: vacuumlazy.c:201
TransactionId NewRelfrozenXid
Definition: vacuumlazy.c:170
bool consider_bypass_optimization
Definition: vacuumlazy.c:159
int64 recently_dead_tuples
Definition: vacuumlazy.c:213
int64 tuples_frozen
Definition: vacuumlazy.c:210
BlockNumber frozen_pages
Definition: vacuumlazy.c:195
char * dbname
Definition: vacuumlazy.c:175
BlockNumber missed_dead_pages
Definition: vacuumlazy.c:197
char * relnamespace
Definition: vacuumlazy.c:176
int64 live_tuples
Definition: vacuumlazy.c:212
int64 lpdead_items
Definition: vacuumlazy.c:211
bool skippedallvis
Definition: vacuumlazy.c:172
BlockNumber lpdead_item_pages
Definition: vacuumlazy.c:196
bool skipwithvm
Definition: vacuumlazy.c:155
bool do_index_cleanup
Definition: vacuumlazy.c:163
MultiXactId NewRelminMxid
Definition: vacuumlazy.c:171
int64 missed_dead_tuples
Definition: vacuumlazy.c:214
VacErrPhase phase
Definition: vacuumlazy.c:181
char * indname
Definition: vacuumlazy.c:178
Form_pg_class rd_rel
Definition: rel.h:110
TransactionId FreezeLimit
Definition: vacuum.h:276
TransactionId relfrozenxid
Definition: vacuum.h:250
MultiXactId relminmxid
Definition: vacuum.h:251
MultiXactId MultiXactCutoff
Definition: vacuum.h:277
MultiXactId OldestMxact
Definition: vacuum.h:267
int nworkers
Definition: vacuum.h:238
VacOptValue truncate
Definition: vacuum.h:231
bits32 options
Definition: vacuum.h:219
bool is_wraparound
Definition: vacuum.h:226
int log_min_duration
Definition: vacuum.h:227
VacOptValue index_cleanup
Definition: vacuum.h:230
uint64 wal_bytes
Definition: instrument.h:53
int64 wal_fpi
Definition: instrument.h:52
int64 wal_records
Definition: instrument.h:51
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:299
static TransactionId ReadNextTransactionId(void)
Definition: transam.h:315
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:2147
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
Definition: vacuum.c:1309
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:2190
bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params, struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:964
#define VACOPT_VERBOSE
Definition: vacuum.h:185
@ VACOPTVALUE_AUTO
Definition: vacuum.h:206
@ VACOPTVALUE_ENABLED
Definition: vacuum.h:208
@ VACOPTVALUE_UNSPECIFIED
Definition: vacuum.h:205
@ VACOPTVALUE_DISABLED
Definition: vacuum.h:207
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition: vacuum.h:191
static void dead_items_cleanup(LVRelState *vacrel)
Definition: vacuumlazy.c:3201
static void update_relstats_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3342
static void vacuum_error_callback(void *arg)
Definition: vacuumlazy.c:3377
static void lazy_truncate_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:2831
static bool should_attempt_truncation(LVRelState *vacrel)
Definition: vacuumlazy.c:2810
static void lazy_scan_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:822
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
Definition: vacuumlazy.c:2616
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
Definition: vacuumlazy.c:3144
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
bool IsInParallelMode(void)
Definition: xact.c:1069

References _, LVRelState::aggressive, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert(), LVRelState::bstrategy, buf, ErrorContextCallback::callback, LVRelState::consider_bypass_optimization, LVRelState::cutoffs, LVRelState::dbname, dead_items_alloc(), dead_items_cleanup(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errmsg(), errmsg_internal(), error_context_stack, LVRelState::failsafe_active, VacuumCutoffs::FreezeLimit, LVRelState::frozen_pages, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), GlobalVisTestFor(), i, VacuumParams::index_cleanup, LVRelState::indname, LVRelState::indrels, LVRelState::indstats, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsAutoVacuumWorkerProcess(), IsInParallelMode(), lazy_check_wraparound_failsafe(), lazy_scan_heap(), lazy_truncate_heap(), LVRelState::live_tuples, LOG, VacuumParams::log_min_duration, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, Max, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, VacuumCutoffs::MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyDatabaseId, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, NoLock, LVRelState::nonempty_pages, LVRelState::num_index_scans, IndexBulkDeleteResult::num_pages, VacuumParams::nworkers, VacuumCutoffs::OldestMxact, VacuumCutoffs::OldestXmin, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, LVRelState::phase, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, pstrdup(), RelationData::rd_rel, ReadNextTransactionId(), LVRelState::recently_dead_tuples, LVRelState::rel, LVRelState::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, LVRelState::relname, LVRelState::relnamespace, LVRelState::removed_pages, RowExclusiveLock, LVRelState::scanned_pages, should_attempt_truncation(), LVRelState::skippedallvis, LVRelState::skipwithvm, TimestampDifference(), TimestampDifferenceExceeds(), track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelState::tuples_deleted, LVRelState::tuples_frozen, update_relstats_all_indexes(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_get_cutoffs(), VacuumPageDirty, VacuumPageHit, VacuumPageMiss, LVRelState::verbose, verbose, visibilitymap_count(), LVRelState::vistest, WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_records, and WalUsageAccumDiff().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 2616 of file vacuumlazy.c.

2617 {
2618  /* Don't warn more than once per VACUUM */
2619  if (vacrel->failsafe_active)
2620  return true;
2621 
2623  {
2624  vacrel->failsafe_active = true;
2625 
2626  /* Disable index vacuuming, index cleanup, and heap rel truncation */
2627  vacrel->do_index_vacuuming = false;
2628  vacrel->do_index_cleanup = false;
2629  vacrel->do_rel_truncate = false;
2630 
2631  ereport(WARNING,
2632  (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2633  vacrel->dbname, vacrel->relnamespace, vacrel->relname,
2634  vacrel->num_index_scans),
2635  errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
2636  errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2637  "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2638 
2639  /* Stop applying cost limits from this point on */
2640  VacuumCostActive = false;
2641  VacuumCostBalance = 0;
2642 
2643  return true;
2644  }
2645 
2646  return false;
2647 }
#define unlikely(x)
Definition: c.h:295
int errdetail(const char *fmt,...)
Definition: elog.c:1202
int errhint(const char *fmt,...)
Definition: elog.c:1316
bool VacuumCostActive
Definition: globals.c:153
int VacuumCostBalance
Definition: globals.c:152
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1151

References LVRelState::cutoffs, LVRelState::dbname, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errdetail(), errhint(), errmsg(), LVRelState::failsafe_active, LVRelState::num_index_scans, LVRelState::relname, LVRelState::relnamespace, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, and WARNING.

Referenced by heap_vacuum_rel(), lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 2653 of file vacuumlazy.c.

2654 {
2655  double reltuples = vacrel->new_rel_tuples;
2656  bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
2657 
2658  Assert(vacrel->do_index_cleanup);
2659  Assert(vacrel->nindexes > 0);
2660 
2661  /* Report that we are now cleaning up indexes */
2664 
2665  if (!ParallelVacuumIsActive(vacrel))
2666  {
2667  for (int idx = 0; idx < vacrel->nindexes; idx++)
2668  {
2669  Relation indrel = vacrel->indrels[idx];
2670  IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2671 
2672  vacrel->indstats[idx] =
2673  lazy_cleanup_one_index(indrel, istat, reltuples,
2674  estimated_count, vacrel);
2675  }
2676  }
2677  else
2678  {
2679  /* Outsource everything to parallel variant */
2680  parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
2681  vacrel->num_index_scans,
2682  estimated_count);
2683  }
2684 }
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:33
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
Definition: vacuumlazy.c:2747
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)

References Assert(), LVRelState::do_index_cleanup, idx(), LVRelState::indrels, LVRelState::indstats, lazy_cleanup_one_index(), LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::num_index_scans, parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelState::pvs, LVRelState::rel_pages, and LVRelState::scanned_pages.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 2747 of file vacuumlazy.c.

2750 {
2751  IndexVacuumInfo ivinfo;
2752  LVSavedErrInfo saved_err_info;
2753 
2754  ivinfo.index = indrel;
2755  ivinfo.analyze_only = false;
2756  ivinfo.report_progress = false;
2757  ivinfo.estimated_count = estimated_count;
2758  ivinfo.message_level = DEBUG2;
2759 
2760  ivinfo.num_heap_tuples = reltuples;
2761  ivinfo.strategy = vacrel->bstrategy;
2762 
2763  /*
2764  * Update error traceback information.
2765  *
2766  * The index name is saved during this phase and restored immediately
2767  * after this phase. See vacuum_error_callback.
2768  */
2769  Assert(vacrel->indname == NULL);
2770  vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2771  update_vacuum_error_info(vacrel, &saved_err_info,
2774 
2775  istat = vac_cleanup_one_index(&ivinfo, istat);
2776 
2777  /* Revert to the previous phase information for error traceback */
2778  restore_vacuum_error_info(vacrel, &saved_err_info);
2779  pfree(vacrel->indname);
2780  vacrel->indname = NULL;
2781 
2782  return istat;
2783 }
Relation index
Definition: genam.h:46
double num_heap_tuples
Definition: genam.h:51
bool analyze_only
Definition: genam.h:47
BufferAccessStrategy strategy
Definition: genam.h:52
bool report_progress
Definition: genam.h:48
int message_level
Definition: genam.h:50
bool estimated_count
Definition: genam.h:49
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition: vacuum.c:2358
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
Definition: vacuumlazy.c:3460
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
Definition: vacuumlazy.c:3441

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel)
static

Definition at line 822 of file vacuumlazy.c.

823 {
824  BlockNumber rel_pages = vacrel->rel_pages,
825  blkno,
826  next_unskippable_block,
827  next_fsm_block_to_vacuum = 0;
828  VacDeadItems *dead_items = vacrel->dead_items;
829  Buffer vmbuffer = InvalidBuffer;
830  bool next_unskippable_allvis,
831  skipping_current_range;
832  const int initprog_index[] = {
836  };
837  int64 initprog_val[3];
838 
839  /* Report that we're scanning the heap, advertising total # of blocks */
840  initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
841  initprog_val[1] = rel_pages;
842  initprog_val[2] = dead_items->max_items;
843  pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
844 
845  /* Set up an initial range of skippable blocks using the visibility map */
846  next_unskippable_block = lazy_scan_skip(vacrel, &vmbuffer, 0,
847  &next_unskippable_allvis,
848  &skipping_current_range);
849  for (blkno = 0; blkno < rel_pages; blkno++)
850  {
851  Buffer buf;
852  Page page;
853  bool all_visible_according_to_vm;
854  LVPagePruneState prunestate;
855 
856  if (blkno == next_unskippable_block)
857  {
858  /*
859  * Can't skip this page safely. Must scan the page. But
860  * determine the next skippable range after the page first.
861  */
862  all_visible_according_to_vm = next_unskippable_allvis;
863  next_unskippable_block = lazy_scan_skip(vacrel, &vmbuffer,
864  blkno + 1,
865  &next_unskippable_allvis,
866  &skipping_current_range);
867 
868  Assert(next_unskippable_block >= blkno + 1);
869  }
870  else
871  {
872  /* Last page always scanned (may need to set nonempty_pages) */
873  Assert(blkno < rel_pages - 1);
874 
875  if (skipping_current_range)
876  continue;
877 
878  /* Current range is too small to skip -- just scan the page */
879  all_visible_according_to_vm = true;
880  }
881 
882  vacrel->scanned_pages++;
883 
884  /* Report as block scanned, update error traceback information */
887  blkno, InvalidOffsetNumber);
888 
890 
891  /*
892  * Regularly check if wraparound failsafe should trigger.
893  *
894  * There is a similar check inside lazy_vacuum_all_indexes(), but
895  * relfrozenxid might start to look dangerously old before we reach
896  * that point. This check also provides failsafe coverage for the
897  * one-pass strategy, and the two-pass strategy with the index_cleanup
898  * param set to 'off'.
899  */
900  if (vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0)
902 
903  /*
904  * Consider if we definitely have enough space to process TIDs on page
905  * already. If we are close to overrunning the available space for
906  * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
907  * this page.
908  */
909  Assert(dead_items->max_items >= MaxHeapTuplesPerPage);
910  if (dead_items->max_items - dead_items->num_items < MaxHeapTuplesPerPage)
911  {
912  /*
913  * Before beginning index vacuuming, we release any pin we may
914  * hold on the visibility map page. This isn't necessary for
915  * correctness, but we do it anyway to avoid holding the pin
916  * across a lengthy, unrelated operation.
917  */
918  if (BufferIsValid(vmbuffer))
919  {
920  ReleaseBuffer(vmbuffer);
921  vmbuffer = InvalidBuffer;
922  }
923 
924  /* Perform a round of index and heap vacuuming */
925  vacrel->consider_bypass_optimization = false;
926  lazy_vacuum(vacrel);
927 
928  /*
929  * Vacuum the Free Space Map to make newly-freed space visible on
930  * upper-level FSM pages. Note we have not yet processed blkno.
931  */
932  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
933  blkno);
934  next_fsm_block_to_vacuum = blkno;
935 
936  /* Report that we are once again scanning the heap */
939  }
940 
941  /*
942  * Pin the visibility map page in case we need to mark the page
943  * all-visible. In most cases this will be very cheap, because we'll
944  * already have the correct page pinned anyway.
945  */
946  visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
947 
948  /*
949  * We need a buffer cleanup lock to prune HOT chains and defragment
950  * the page in lazy_scan_prune. But when it's not possible to acquire
951  * a cleanup lock right away, we may be able to settle for reduced
952  * processing using lazy_scan_noprune.
953  */
954  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
955  vacrel->bstrategy);
956  page = BufferGetPage(buf);
958  {
959  bool hastup,
960  recordfreespace;
961 
963 
964  /* Check for new or empty pages before lazy_scan_noprune call */
965  if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, true,
966  vmbuffer))
967  {
968  /* Processed as new/empty page (lock and pin released) */
969  continue;
970  }
971 
972  /* Collect LP_DEAD items in dead_items array, count tuples */
973  if (lazy_scan_noprune(vacrel, buf, blkno, page, &hastup,
974  &recordfreespace))
975  {
976  Size freespace = 0;
977 
978  /*
979  * Processed page successfully (without cleanup lock) -- just
980  * need to perform rel truncation and FSM steps, much like the
981  * lazy_scan_prune case. Don't bother trying to match its
982  * visibility map setting steps, though.
983  */
984  if (hastup)
985  vacrel->nonempty_pages = blkno + 1;
986  if (recordfreespace)
987  freespace = PageGetHeapFreeSpace(page);
989  if (recordfreespace)
990  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
991  continue;
992  }
993 
994  /*
995  * lazy_scan_noprune could not do all required processing. Wait
996  * for a cleanup lock, and call lazy_scan_prune in the usual way.
997  */
998  Assert(vacrel->aggressive);
1001  }
1002 
1003  /* Check for new or empty pages before lazy_scan_prune call */
1004  if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, false, vmbuffer))
1005  {
1006  /* Processed as new/empty page (lock and pin released) */
1007  continue;
1008  }
1009 
1010  /*
1011  * Prune, freeze, and count tuples.
1012  *
1013  * Accumulates details of remaining LP_DEAD line pointers on page in
1014  * dead_items array. This includes LP_DEAD line pointers that we
1015  * pruned ourselves, as well as existing LP_DEAD line pointers that
1016  * were pruned some time earlier. Also considers freezing XIDs in the
1017  * tuple headers of remaining items with storage.
1018  */
1019  lazy_scan_prune(vacrel, buf, blkno, page, &prunestate);
1020 
1021  Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
1022 
1023  /* Remember the location of the last page with nonremovable tuples */
1024  if (prunestate.hastup)
1025  vacrel->nonempty_pages = blkno + 1;
1026 
1027  if (vacrel->nindexes == 0)
1028  {
1029  /*
1030  * Consider the need to do page-at-a-time heap vacuuming when
1031  * using the one-pass strategy now.
1032  *
1033  * The one-pass strategy will never call lazy_vacuum(). The steps
1034  * performed here can be thought of as the one-pass equivalent of
1035  * a call to lazy_vacuum().
1036  */
1037  if (prunestate.has_lpdead_items)
1038  {
1039  Size freespace;
1040 
1041  lazy_vacuum_heap_page(vacrel, blkno, buf, 0, vmbuffer);
1042 
1043  /* Forget the LP_DEAD items that we just vacuumed */
1044  dead_items->num_items = 0;
1045 
1046  /*
1047  * Periodically perform FSM vacuuming to make newly-freed
1048  * space visible on upper FSM pages. Note we have not yet
1049  * performed FSM processing for blkno.
1050  */
1051  if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1052  {
1053  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1054  blkno);
1055  next_fsm_block_to_vacuum = blkno;
1056  }
1057 
1058  /*
1059  * Now perform FSM processing for blkno, and move on to next
1060  * page.
1061  *
1062  * Our call to lazy_vacuum_heap_page() will have considered if
1063  * it's possible to set all_visible/all_frozen independently
1064  * of lazy_scan_prune(). Note that prunestate was invalidated
1065  * by lazy_vacuum_heap_page() call.
1066  */
1067  freespace = PageGetHeapFreeSpace(page);
1068 
1070  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1071  continue;
1072  }
1073 
1074  /*
1075  * There was no call to lazy_vacuum_heap_page() because pruning
1076  * didn't encounter/create any LP_DEAD items that needed to be
1077  * vacuumed. Prune state has not been invalidated, so proceed
1078  * with prunestate-driven visibility map and FSM steps (just like
1079  * the two-pass strategy).
1080  */
1081  Assert(dead_items->num_items == 0);
1082  }
1083 
1084  /*
1085  * Handle setting visibility map bit based on information from the VM
1086  * (as of last lazy_scan_skip() call), and from prunestate
1087  */
1088  if (!all_visible_according_to_vm && prunestate.all_visible)
1089  {
1091 
1092  if (prunestate.all_frozen)
1093  {
1095  flags |= VISIBILITYMAP_ALL_FROZEN;
1096  }
1097 
1098  /*
1099  * It should never be the case that the visibility map page is set
1100  * while the page-level bit is clear, but the reverse is allowed
1101  * (if checksums are not enabled). Regardless, set both bits so
1102  * that we get back in sync.
1103  *
1104  * NB: If the heap page is all-visible but the VM bit is not set,
1105  * we don't need to dirty the heap page. However, if checksums
1106  * are enabled, we do need to make sure that the heap page is
1107  * dirtied before passing it to visibilitymap_set(), because it
1108  * may be logged. Given that this situation should only happen in
1109  * rare cases after a crash, it is not worth optimizing.
1110  */
1111  PageSetAllVisible(page);
1113  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1114  vmbuffer, prunestate.visibility_cutoff_xid,
1115  flags);
1116  }
1117 
1118  /*
1119  * As of PostgreSQL 9.2, the visibility map bit should never be set if
1120  * the page-level bit is clear. However, it's possible that the bit
1121  * got cleared after lazy_scan_skip() was called, so we must recheck
1122  * with buffer lock before concluding that the VM is corrupt.
1123  */
1124  else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
1125  visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
1126  {
1127  elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1128  vacrel->relname, blkno);
1129  visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1131  }
1132 
1133  /*
1134  * It's possible for the value returned by
1135  * GetOldestNonRemovableTransactionId() to move backwards, so it's not
1136  * wrong for us to see tuples that appear to not be visible to
1137  * everyone yet, while PD_ALL_VISIBLE is already set. The real safe
1138  * xmin value never moves backwards, but
1139  * GetOldestNonRemovableTransactionId() is conservative and sometimes
1140  * returns a value that's unnecessarily small, so if we see that
1141  * contradiction it just means that the tuples that we think are not
1142  * visible to everyone yet actually are, and the PD_ALL_VISIBLE flag
1143  * is correct.
1144  *
1145  * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE
1146  * set, however.
1147  */
1148  else if (prunestate.has_lpdead_items && PageIsAllVisible(page))
1149  {
1150  elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
1151  vacrel->relname, blkno);
1152  PageClearAllVisible(page);
1154  visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1156  }
1157 
1158  /*
1159  * If the all-visible page is all-frozen but not marked as such yet,
1160  * mark it as all-frozen. Note that all_frozen is only valid if
1161  * all_visible is true, so we must check both prunestate fields.
1162  */
1163  else if (all_visible_according_to_vm && prunestate.all_visible &&
1164  prunestate.all_frozen &&
1165  !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
1166  {
1167  /*
1168  * Avoid relying on all_visible_according_to_vm as a proxy for the
1169  * page-level PD_ALL_VISIBLE bit being set, since it might have
1170  * become stale -- even when all_visible is set in prunestate
1171  */
1172  if (!PageIsAllVisible(page))
1173  {
1174  PageSetAllVisible(page);
1176  }
1177 
1178  /*
1179  * Set the page all-frozen (and all-visible) in the VM.
1180  *
1181  * We can pass InvalidTransactionId as our visibility_cutoff_xid,
1182  * since a snapshotConflictHorizon sufficient to make everything
1183  * safe for REDO was logged when the page's tuples were frozen.
1184  */
1186  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1187  vmbuffer, InvalidTransactionId,
1190  }
1191 
1192  /*
1193  * Final steps for block: drop cleanup lock, record free space in the
1194  * FSM
1195  */
1196  if (prunestate.has_lpdead_items && vacrel->do_index_vacuuming)
1197  {
1198  /*
1199  * Wait until lazy_vacuum_heap_rel() to save free space. This
1200  * doesn't just save us some cycles; it also allows us to record
1201  * any additional free space that lazy_vacuum_heap_page() will
1202  * make available in cases where it's possible to truncate the
1203  * page's line pointer array.
1204  *
1205  * Note: It's not in fact 100% certain that we really will call
1206  * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip
1207  * index vacuuming (and so must skip heap vacuuming). This is
1208  * deemed okay because it only happens in emergencies, or when
1209  * there is very little free space anyway. (Besides, we start
1210  * recording free space in the FSM once index vacuuming has been
1211  * abandoned.)
1212  *
1213  * Note: The one-pass (no indexes) case is only supposed to make
1214  * it this far when there were no LP_DEAD items during pruning.
1215  */
1216  Assert(vacrel->nindexes > 0);
1218  }
1219  else
1220  {
1221  Size freespace = PageGetHeapFreeSpace(page);
1222 
1224  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1225  }
1226  }
1227 
1228  vacrel->blkno = InvalidBlockNumber;
1229  if (BufferIsValid(vmbuffer))
1230  ReleaseBuffer(vmbuffer);
1231 
1232  /* report that everything is now scanned */
1234 
1235  /* now we can compute the new value for pg_class.reltuples */
1236  vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1237  vacrel->scanned_pages,
1238  vacrel->live_tuples);
1239 
1240  /*
1241  * Also compute the total number of surviving heap entries. In the
1242  * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1243  */
1244  vacrel->new_rel_tuples =
1245  Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1246  vacrel->missed_dead_tuples;
1247 
1248  /*
1249  * Do index vacuuming (call each index's ambulkdelete routine), then do
1250  * related heap vacuuming
1251  */
1252  if (dead_items->num_items > 0)
1253  lazy_vacuum(vacrel);
1254 
1255  /*
1256  * Vacuum the remainder of the Free Space Map. We must do this whether or
1257  * not there were indexes, and whether or not we bypassed index vacuuming.
1258  */
1259  if (blkno > next_fsm_block_to_vacuum)
1260  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno);
1261 
1262  /* report all blocks vacuumed */
1264 
1265  /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1266  if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1267  lazy_cleanup_all_indexes(vacrel);
1268 }
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3985
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1621
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4283
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4453
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:110
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:232
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:991
static void PageClearAllVisible(Page page)
Definition: bufpage.h:436
static void PageSetAllVisible(Page page)
Definition: bufpage.h:431
static bool PageIsAllVisible(Page page)
Definition: bufpage.h:426
unsigned char uint8
Definition: c.h:488
size_t Size
Definition: c.h:589
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:354
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:182
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:30
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define PROGRESS_VACUUM_MAX_DEAD_TUPLES
Definition: progress.h:26
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
TransactionId visibility_cutoff_xid
Definition: vacuumlazy.c:232
BlockNumber blkno
Definition: vacuumlazy.c:179
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void vacuum_delay_point(void)
Definition: vacuum.c:2211
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1213
static void lazy_vacuum(LVRelState *vacrel)
Definition: vacuumlazy.c:2190
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2653
static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer vmbuffer)
Definition: vacuumlazy.c:2499
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
Definition: vacuumlazy.c:1409
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *hastup, bool *recordfreespace)
Definition: vacuumlazy.c:1956
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, LVPagePruneState *prunestate)
Definition: vacuumlazy.c:1532
static BlockNumber lazy_scan_skip(LVRelState *vacrel, Buffer *vmbuffer, BlockNumber next_block, bool *next_unskippable_allvis, bool *skipping_current_range)
Definition: vacuumlazy.c:1293
#define FAILSAFE_EVERY_PAGES
Definition: vacuumlazy.c:100
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:109
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:26
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28

References LVRelState::aggressive, LVPagePruneState::all_frozen, LVPagePruneState::all_visible, Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage(), BufferIsValid(), ConditionalLockBufferForCleanup(), LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, elog(), FAILSAFE_EVERY_PAGES, FreeSpaceMapVacuumRange(), LVPagePruneState::has_lpdead_items, LVPagePruneState::hastup, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, InvalidTransactionId, InvalidXLogRecPtr, lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_new_or_empty(), lazy_scan_noprune(), lazy_scan_prune(), lazy_scan_skip(), lazy_vacuum(), lazy_vacuum_heap_page(), LVRelState::live_tuples, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, MarkBufferDirty(), Max, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::nonempty_pages, VacDeadItems::num_items, PageClearAllVisible(), PageGetHeapFreeSpace(), PageIsAllVisible(), PageSetAllVisible(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, RBM_NORMAL, ReadBufferExtended(), LVRelState::recently_dead_tuples, RecordPageWithFreeSpace(), LVRelState::rel, LVRelState::rel_pages, ReleaseBuffer(), LVRelState::relname, LVRelState::scanned_pages, TransactionIdIsValid, UnlockReleaseBuffer(), update_vacuum_error_info(), vac_estimate_reltuples(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, LVPagePruneState::visibility_cutoff_xid, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, VM_ALL_FROZEN, and WARNING.

Referenced by heap_vacuum_rel().

◆ lazy_scan_new_or_empty()

static bool lazy_scan_new_or_empty ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool  sharelock,
Buffer  vmbuffer 
)
static

Definition at line 1409 of file vacuumlazy.c.

1411 {
1412  Size freespace;
1413 
1414  if (PageIsNew(page))
1415  {
1416  /*
1417  * All-zeroes pages can be left over if either a backend extends the
1418  * relation by a single page, but crashes before the newly initialized
1419  * page has been written out, or when bulk-extending the relation
1420  * (which creates a number of empty pages at the tail end of the
1421  * relation), and then enters them into the FSM.
1422  *
1423  * Note we do not enter the page into the visibilitymap. That has the
1424  * downside that we repeatedly visit this page in subsequent vacuums,
1425  * but otherwise we'll never discover the space on a promoted standby.
1426  * The harm of repeated checking ought to normally not be too bad. The
1427  * space usually should be used at some point, otherwise there
1428  * wouldn't be any regular vacuums.
1429  *
1430  * Make sure these pages are in the FSM, to ensure they can be reused.
1431  * Do that by testing if there's any space recorded for the page. If
1432  * not, enter it. We do so after releasing the lock on the heap page,
1433  * the FSM is approximate, after all.
1434  */
1436 
1437  if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1438  {
1439  freespace = BLCKSZ - SizeOfPageHeaderData;
1440 
1441  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1442  }
1443 
1444  return true;
1445  }
1446 
1447  if (PageIsEmpty(page))
1448  {
1449  /*
1450  * It seems likely that caller will always be able to get a cleanup
1451  * lock on an empty page. But don't take any chances -- escalate to
1452  * an exclusive lock (still don't need a cleanup lock, though).
1453  */
1454  if (sharelock)
1455  {
1458 
1459  if (!PageIsEmpty(page))
1460  {
1461  /* page isn't new or empty -- keep lock and pin for now */
1462  return false;
1463  }
1464  }
1465  else
1466  {
1467  /* Already have a full cleanup lock (which is more than enough) */
1468  }
1469 
1470  /*
1471  * Unlike new pages, empty pages are always set all-visible and
1472  * all-frozen.
1473  */
1474  if (!PageIsAllVisible(page))
1475  {
1477 
1478  /* mark buffer dirty before writing a WAL record */
1480 
1481  /*
1482  * It's possible that another backend has extended the heap,
1483  * initialized the page, and then failed to WAL-log the page due
1484  * to an ERROR. Since heap extension is not WAL-logged, recovery
1485  * might try to replay our record setting the page all-visible and
1486  * find that the page isn't initialized, which will cause a PANIC.
1487  * To prevent that, check whether the page has been previously
1488  * WAL-logged, and if not, do that now.
1489  */
1490  if (RelationNeedsWAL(vacrel->rel) &&
1491  PageGetLSN(page) == InvalidXLogRecPtr)
1492  log_newpage_buffer(buf, true);
1493 
1494  PageSetAllVisible(page);
1495  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1496  vmbuffer, InvalidTransactionId,
1498  END_CRIT_SECTION();
1499  }
1500 
1501  freespace = PageGetHeapFreeSpace(page);
1503  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1504  return true;
1505  }
1506 
1507  /* page isn't new or empty -- keep lock and pin */
1508  return false;
1509 }
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:112
#define SizeOfPageHeaderData
Definition: bufpage.h:213
static XLogRecPtr PageGetLSN(Page page)
Definition: bufpage.h:383
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:232
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define END_CRIT_SECTION()
Definition: miscadmin.h:150
#define RelationNeedsWAL(relation)
Definition: rel.h:628
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1191

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, END_CRIT_SECTION, GetRecordedFreeSpace(), InvalidTransactionId, InvalidXLogRecPtr, LockBuffer(), log_newpage_buffer(), MarkBufferDirty(), PageGetHeapFreeSpace(), PageGetLSN(), PageIsAllVisible(), PageIsEmpty(), PageIsNew(), PageSetAllVisible(), RecordPageWithFreeSpace(), LVRelState::rel, RelationNeedsWAL, SizeOfPageHeaderData, START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_set().

Referenced by lazy_scan_heap().

◆ lazy_scan_noprune()

static bool lazy_scan_noprune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool hastup,
bool recordfreespace 
)
static

Definition at line 1956 of file vacuumlazy.c.

1962 {
1963  OffsetNumber offnum,
1964  maxoff;
1965  int lpdead_items,
1966  live_tuples,
1967  recently_dead_tuples,
1968  missed_dead_tuples;
1969  HeapTupleHeader tupleheader;
1970  TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
1971  MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
1972  OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
1973 
1974  Assert(BufferGetBlockNumber(buf) == blkno);
1975 
1976  *hastup = false; /* for now */
1977  *recordfreespace = false; /* for now */
1978 
1979  lpdead_items = 0;
1980  live_tuples = 0;
1981  recently_dead_tuples = 0;
1982  missed_dead_tuples = 0;
1983 
1984  maxoff = PageGetMaxOffsetNumber(page);
1985  for (offnum = FirstOffsetNumber;
1986  offnum <= maxoff;
1987  offnum = OffsetNumberNext(offnum))
1988  {
1989  ItemId itemid;
1990  HeapTupleData tuple;
1991 
1992  vacrel->offnum = offnum;
1993  itemid = PageGetItemId(page, offnum);
1994 
1995  if (!ItemIdIsUsed(itemid))
1996  continue;
1997 
1998  if (ItemIdIsRedirected(itemid))
1999  {
2000  *hastup = true;
2001  continue;
2002  }
2003 
2004  if (ItemIdIsDead(itemid))
2005  {
2006  /*
2007  * Deliberately don't set hastup=true here. See same point in
2008  * lazy_scan_prune for an explanation.
2009  */
2010  deadoffsets[lpdead_items++] = offnum;
2011  continue;
2012  }
2013 
2014  *hastup = true; /* page prevents rel truncation */
2015  tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
2016  if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
2017  &NoFreezePageRelfrozenXid,
2018  &NoFreezePageRelminMxid))
2019  {
2020  /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
2021  if (vacrel->aggressive)
2022  {
2023  /*
2024  * Aggressive VACUUMs must always be able to advance rel's
2025  * relfrozenxid to a value >= FreezeLimit (and be able to
2026  * advance rel's relminmxid to a value >= MultiXactCutoff).
2027  * The ongoing aggressive VACUUM won't be able to do that
2028  * unless it can freeze an XID (or MXID) from this tuple now.
2029  *
2030  * The only safe option is to have caller perform processing
2031  * of this page using lazy_scan_prune. Caller might have to
2032  * wait a while for a cleanup lock, but it can't be helped.
2033  */
2034  vacrel->offnum = InvalidOffsetNumber;
2035  return false;
2036  }
2037 
2038  /*
2039  * Non-aggressive VACUUMs are under no obligation to advance
2040  * relfrozenxid (even by one XID). We can be much laxer here.
2041  *
2042  * Currently we always just accept an older final relfrozenxid
2043  * and/or relminmxid value. We never make caller wait or work a
2044  * little harder, even when it likely makes sense to do so.
2045  */
2046  }
2047 
2048  ItemPointerSet(&(tuple.t_self), blkno, offnum);
2049  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2050  tuple.t_len = ItemIdGetLength(itemid);
2051  tuple.t_tableOid = RelationGetRelid(vacrel->rel);
2052 
2053  switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
2054  buf))
2055  {
2057  case HEAPTUPLE_LIVE:
2058 
2059  /*
2060  * Count both cases as live, just like lazy_scan_prune
2061  */
2062  live_tuples++;
2063 
2064  break;
2065  case HEAPTUPLE_DEAD:
2066 
2067  /*
2068  * There is some useful work for pruning to do, that won't be
2069  * done due to failure to get a cleanup lock.
2070  */
2071  missed_dead_tuples++;
2072  break;
2074 
2075  /*
2076  * Count in recently_dead_tuples, just like lazy_scan_prune
2077  */
2078  recently_dead_tuples++;
2079  break;
2081 
2082  /*
2083  * Do not count these rows as live, just like lazy_scan_prune
2084  */
2085  break;
2086  default:
2087  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2088  break;
2089  }
2090  }
2091 
2092  vacrel->offnum = InvalidOffsetNumber;
2093 
2094  /*
2095  * By here we know for sure that caller can put off freezing and pruning
2096  * this particular page until the next VACUUM. Remember its details now.
2097  * (lazy_scan_prune expects a clean slate, so we have to do this last.)
2098  */
2099  vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
2100  vacrel->NewRelminMxid = NoFreezePageRelminMxid;
2101 
2102  /* Save any LP_DEAD items found on the page in dead_items array */
2103  if (vacrel->nindexes == 0)
2104  {
2105  /* Using one-pass strategy (since table has no indexes) */
2106  if (lpdead_items > 0)
2107  {
2108  /*
2109  * Perfunctory handling for the corner case where a single pass
2110  * strategy VACUUM cannot get a cleanup lock, and it turns out
2111  * that there is one or more LP_DEAD items: just count the LP_DEAD
2112  * items as missed_dead_tuples instead. (This is a bit dishonest,
2113  * but it beats having to maintain specialized heap vacuuming code
2114  * forever, for vanishingly little benefit.)
2115  */
2116  *hastup = true;
2117  missed_dead_tuples += lpdead_items;
2118  }
2119 
2120  *recordfreespace = true;
2121  }
2122  else if (lpdead_items == 0)
2123  {
2124  /*
2125  * Won't be vacuuming this page later, so record page's freespace in
2126  * the FSM now
2127  */
2128  *recordfreespace = true;
2129  }
2130  else
2131  {
2132  VacDeadItems *dead_items = vacrel->dead_items;
2133  ItemPointerData tmp;
2134 
2135  /*
2136  * Page has LP_DEAD items, and so any references/TIDs that remain in
2137  * indexes will be deleted during index vacuuming (and then marked
2138  * LP_UNUSED in the heap)
2139  */
2140  vacrel->lpdead_item_pages++;
2141 
2142  ItemPointerSetBlockNumber(&tmp, blkno);
2143 
2144  for (int i = 0; i < lpdead_items; i++)
2145  {
2146  ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]);
2147  dead_items->items[dead_items->num_items++] = tmp;
2148  }
2149 
2150  Assert(dead_items->num_items <= dead_items->max_items);
2152  dead_items->num_items);
2153 
2154  vacrel->lpdead_items += lpdead_items;
2155 
2156  /*
2157  * Assume that we'll go on to vacuum this heap page during final pass
2158  * over the heap. Don't record free space until then.
2159  */
2160  *recordfreespace = false;
2161  }
2162 
2163  /*
2164  * Finally, add relevant page-local counts to whole-VACUUM counts
2165  */
2166  vacrel->live_tuples += live_tuples;
2167  vacrel->recently_dead_tuples += recently_dead_tuples;
2168  vacrel->missed_dead_tuples += missed_dead_tuples;
2169  if (missed_dead_tuples > 0)
2170  vacrel->missed_dead_pages++;
2171 
2172  /* Caller won't need to call lazy_scan_prune with same page */
2173  return true;
2174 }
TransactionId MultiXactId
Definition: c.h:646
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition: heapam.c:7331
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition: itemptr.h:147
#define PROGRESS_VACUUM_NUM_DEAD_TUPLES
Definition: progress.h:27
ItemPointerData items[FLEXIBLE_ARRAY_MEMBER]
Definition: vacuum.h:289

References LVRelState::aggressive, Assert(), buf, BufferGetBlockNumber(), LVRelState::cutoffs, LVRelState::dead_items, elog(), ERROR, FirstOffsetNumber, heap_tuple_should_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuum(), i, InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), VacDeadItems::items, LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, VacDeadItems::num_items, LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), pgstat_progress_update_param(), PROGRESS_VACUUM_NUM_DEAD_TUPLES, LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by lazy_scan_heap().

◆ lazy_scan_prune()

static void lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
LVPagePruneState prunestate 
)
static

Definition at line 1532 of file vacuumlazy.c.

1537 {
1538  Relation rel = vacrel->rel;
1539  OffsetNumber offnum,
1540  maxoff;
1541  ItemId itemid;
1542  HeapTupleData tuple;
1543  HTSV_Result res;
1544  int tuples_deleted,
1545  tuples_frozen,
1546  lpdead_items,
1547  live_tuples,
1548  recently_dead_tuples;
1549  int nnewlpdead;
1550  HeapPageFreeze pagefrz;
1551  int64 fpi_before = pgWalUsage.wal_fpi;
1552  OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
1554 
1555  Assert(BufferGetBlockNumber(buf) == blkno);
1556 
1557  /*
1558  * maxoff might be reduced following line pointer array truncation in
1559  * heap_page_prune. That's safe for us to ignore, since the reclaimed
1560  * space will continue to look like LP_UNUSED items below.
1561  */
1562  maxoff = PageGetMaxOffsetNumber(page);
1563 
1564 retry:
1565 
1566  /* Initialize (or reset) page-level state */
1567  pagefrz.freeze_required = false;
1568  pagefrz.FreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
1569  pagefrz.FreezePageRelminMxid = vacrel->NewRelminMxid;
1570  pagefrz.NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
1571  pagefrz.NoFreezePageRelminMxid = vacrel->NewRelminMxid;
1572  tuples_deleted = 0;
1573  tuples_frozen = 0;
1574  lpdead_items = 0;
1575  live_tuples = 0;
1576  recently_dead_tuples = 0;
1577 
1578  /*
1579  * Prune all HOT-update chains in this page.
1580  *
1581  * We count tuples removed by the pruning step as tuples_deleted. Its
1582  * final value can be thought of as the number of tuples that have been
1583  * deleted from the table. It should not be confused with lpdead_items;
1584  * lpdead_items's final value can be thought of as the number of tuples
1585  * that were deleted from indexes.
1586  */
1587  tuples_deleted = heap_page_prune(rel, buf, vacrel->vistest,
1588  InvalidTransactionId, 0, &nnewlpdead,
1589  &vacrel->offnum);
1590 
1591  /*
1592  * Now scan the page to collect LP_DEAD items and check for tuples
1593  * requiring freezing among remaining tuples with storage
1594  */
1595  prunestate->hastup = false;
1596  prunestate->has_lpdead_items = false;
1597  prunestate->all_visible = true;
1598  prunestate->all_frozen = true;
1600 
1601  for (offnum = FirstOffsetNumber;
1602  offnum <= maxoff;
1603  offnum = OffsetNumberNext(offnum))
1604  {
1605  bool totally_frozen;
1606 
1607  /*
1608  * Set the offset number so that we can display it along with any
1609  * error that occurred while processing this tuple.
1610  */
1611  vacrel->offnum = offnum;
1612  itemid = PageGetItemId(page, offnum);
1613 
1614  if (!ItemIdIsUsed(itemid))
1615  continue;
1616 
1617  /* Redirect items mustn't be touched */
1618  if (ItemIdIsRedirected(itemid))
1619  {
1620  /* page makes rel truncation unsafe */
1621  prunestate->hastup = true;
1622  continue;
1623  }
1624 
1625  if (ItemIdIsDead(itemid))
1626  {
1627  /*
1628  * Deliberately don't set hastup for LP_DEAD items. We make the
1629  * soft assumption that any LP_DEAD items encountered here will
1630  * become LP_UNUSED later on, before count_nondeletable_pages is
1631  * reached. If we don't make this assumption then rel truncation
1632  * will only happen every other VACUUM, at most. Besides, VACUUM
1633  * must treat hastup/nonempty_pages as provisional no matter how
1634  * LP_DEAD items are handled (handled here, or handled later on).
1635  *
1636  * Also deliberately delay unsetting all_visible until just before
1637  * we return to lazy_scan_heap caller, as explained in full below.
1638  * (This is another case where it's useful to anticipate that any
1639  * LP_DEAD items will become LP_UNUSED during the ongoing VACUUM.)
1640  */
1641  deadoffsets[lpdead_items++] = offnum;
1642  continue;
1643  }
1644 
1645  Assert(ItemIdIsNormal(itemid));
1646 
1647  ItemPointerSet(&(tuple.t_self), blkno, offnum);
1648  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1649  tuple.t_len = ItemIdGetLength(itemid);
1650  tuple.t_tableOid = RelationGetRelid(rel);
1651 
1652  /*
1653  * DEAD tuples are almost always pruned into LP_DEAD line pointers by
1654  * heap_page_prune(), but it's possible that the tuple state changed
1655  * since heap_page_prune() looked. Handle that here by restarting.
1656  * (See comments at the top of function for a full explanation.)
1657  */
1658  res = HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
1659  buf);
1660 
1661  if (unlikely(res == HEAPTUPLE_DEAD))
1662  goto retry;
1663 
1664  /*
1665  * The criteria for counting a tuple as live in this block need to
1666  * match what analyze.c's acquire_sample_rows() does, otherwise VACUUM
1667  * and ANALYZE may produce wildly different reltuples values, e.g.
1668  * when there are many recently-dead tuples.
1669  *
1670  * The logic here is a bit simpler than acquire_sample_rows(), as
1671  * VACUUM can't run inside a transaction block, which makes some cases
1672  * impossible (e.g. in-progress insert from the same transaction).
1673  *
1674  * We treat LP_DEAD items (which are the closest thing to DEAD tuples
1675  * that might be seen here) differently, too: we assume that they'll
1676  * become LP_UNUSED before VACUUM finishes. This difference is only
1677  * superficial. VACUUM effectively agrees with ANALYZE about DEAD
1678  * items, in the end. VACUUM won't remember LP_DEAD items, but only
1679  * because they're not supposed to be left behind when it is done.
1680  * (Cases where we bypass index vacuuming will violate this optimistic
1681  * assumption, but the overall impact of that should be negligible.)
1682  */
1683  switch (res)
1684  {
1685  case HEAPTUPLE_LIVE:
1686 
1687  /*
1688  * Count it as live. Not only is this natural, but it's also
1689  * what acquire_sample_rows() does.
1690  */
1691  live_tuples++;
1692 
1693  /*
1694  * Is the tuple definitely visible to all transactions?
1695  *
1696  * NB: Like with per-tuple hint bits, we can't set the
1697  * PD_ALL_VISIBLE flag if the inserter committed
1698  * asynchronously. See SetHintBits for more info. Check that
1699  * the tuple is hinted xmin-committed because of that.
1700  */
1701  if (prunestate->all_visible)
1702  {
1703  TransactionId xmin;
1704 
1706  {
1707  prunestate->all_visible = false;
1708  break;
1709  }
1710 
1711  /*
1712  * The inserter definitely committed. But is it old enough
1713  * that everyone sees it as committed?
1714  */
1715  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1716  if (!TransactionIdPrecedes(xmin,
1717  vacrel->cutoffs.OldestXmin))
1718  {
1719  prunestate->all_visible = false;
1720  break;
1721  }
1722 
1723  /* Track newest xmin on page. */
1724  if (TransactionIdFollows(xmin, prunestate->visibility_cutoff_xid) &&
1725  TransactionIdIsNormal(xmin))
1726  prunestate->visibility_cutoff_xid = xmin;
1727  }
1728  break;
1730 
1731  /*
1732  * If tuple is recently dead then we must not remove it from
1733  * the relation. (We only remove items that are LP_DEAD from
1734  * pruning.)
1735  */
1736  recently_dead_tuples++;
1737  prunestate->all_visible = false;
1738  break;
1740 
1741  /*
1742  * We do not count these rows as live, because we expect the
1743  * inserting transaction to update the counters at commit, and
1744  * we assume that will happen only after we report our
1745  * results. This assumption is a bit shaky, but it is what
1746  * acquire_sample_rows() does, so be consistent.
1747  */
1748  prunestate->all_visible = false;
1749  break;
1751  /* This is an expected case during concurrent vacuum */
1752  prunestate->all_visible = false;
1753 
1754  /*
1755  * Count such rows as live. As above, we assume the deleting
1756  * transaction will commit and update the counters after we
1757  * report.
1758  */
1759  live_tuples++;
1760  break;
1761  default:
1762  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1763  break;
1764  }
1765 
1766  prunestate->hastup = true; /* page makes rel truncation unsafe */
1767 
1768  /* Tuple with storage -- consider need to freeze */
1769  if (heap_prepare_freeze_tuple(tuple.t_data, &vacrel->cutoffs, &pagefrz,
1770  &frozen[tuples_frozen], &totally_frozen))
1771  {
1772  /* Save prepared freeze plan for later */
1773  frozen[tuples_frozen++].offset = offnum;
1774  }
1775 
1776  /*
1777  * If any tuple isn't either totally frozen already or eligible to
1778  * become totally frozen (according to its freeze plan), then the page
1779  * definitely cannot be set all-frozen in the visibility map later on
1780  */
1781  if (!totally_frozen)
1782  prunestate->all_frozen = false;
1783  }
1784 
1785  /*
1786  * We have now divided every item on the page into either an LP_DEAD item
1787  * that will need to be vacuumed in indexes later, or a LP_NORMAL tuple
1788  * that remains and needs to be considered for freezing now (LP_UNUSED and
1789  * LP_REDIRECT items also remain, but are of no further interest to us).
1790  */
1791  vacrel->offnum = InvalidOffsetNumber;
1792 
1793  /*
1794  * Freeze the page when heap_prepare_freeze_tuple indicates that at least
1795  * one XID/MXID from before FreezeLimit/MultiXactCutoff is present. Also
1796  * freeze when pruning generated an FPI, if doing so means that we set the
1797  * page all-frozen afterwards (might not happen until final heap pass).
1798  */
1799  if (pagefrz.freeze_required || tuples_frozen == 0 ||
1800  (prunestate->all_visible && prunestate->all_frozen &&
1801  fpi_before != pgWalUsage.wal_fpi))
1802  {
1803  /*
1804  * We're freezing the page. Our final NewRelfrozenXid doesn't need to
1805  * be affected by the XIDs that are just about to be frozen anyway.
1806  */
1807  vacrel->NewRelfrozenXid = pagefrz.FreezePageRelfrozenXid;
1808  vacrel->NewRelminMxid = pagefrz.FreezePageRelminMxid;
1809 
1810  if (tuples_frozen == 0)
1811  {
1812  /*
1813  * We have no freeze plans to execute, so there's no added cost
1814  * from following the freeze path. That's why it was chosen.
1815  * This is important in the case where the page only contains
1816  * totally frozen tuples at this point (perhaps only following
1817  * pruning). Such pages can be marked all-frozen in the VM by our
1818  * caller, even though none of its tuples were newly frozen here
1819  * (note that the "no freeze" path never sets pages all-frozen).
1820  *
1821  * We never increment the frozen_pages instrumentation counter
1822  * here, since it only counts pages with newly frozen tuples
1823  * (don't confuse that with pages newly set all-frozen in VM).
1824  */
1825  }
1826  else
1827  {
1828  TransactionId snapshotConflictHorizon;
1829 
1830  vacrel->frozen_pages++;
1831 
1832  /*
1833  * We can use visibility_cutoff_xid as our cutoff for conflicts
1834  * when the whole page is eligible to become all-frozen in the VM
1835  * once we're done with it. Otherwise we generate a conservative
1836  * cutoff by stepping back from OldestXmin.
1837  */
1838  if (prunestate->all_visible && prunestate->all_frozen)
1839  {
1840  /* Using same cutoff when setting VM is now unnecessary */
1841  snapshotConflictHorizon = prunestate->visibility_cutoff_xid;
1843  }
1844  else
1845  {
1846  /* Avoids false conflicts when hot_standby_feedback in use */
1847  snapshotConflictHorizon = vacrel->cutoffs.OldestXmin;
1848  TransactionIdRetreat(snapshotConflictHorizon);
1849  }
1850 
1851  /* Execute all freeze plans for page as a single atomic action */
1853  snapshotConflictHorizon,
1854  frozen, tuples_frozen);
1855  }
1856  }
1857  else
1858  {
1859  /*
1860  * Page requires "no freeze" processing. It might be set all-visible
1861  * in the visibility map, but it can never be set all-frozen.
1862  */
1863  vacrel->NewRelfrozenXid = pagefrz.NoFreezePageRelfrozenXid;
1864  vacrel->NewRelminMxid = pagefrz.NoFreezePageRelminMxid;
1865  prunestate->all_frozen = false;
1866  tuples_frozen = 0; /* avoid miscounts in instrumentation */
1867  }
1868 
1869  /*
1870  * VACUUM will call heap_page_is_all_visible() during the second pass over
1871  * the heap to determine all_visible and all_frozen for the page -- this
1872  * is a specialized version of the logic from this function. Now that
1873  * we've finished pruning and freezing, make sure that we're in total
1874  * agreement with heap_page_is_all_visible() using an assertion.
1875  */
1876 #ifdef USE_ASSERT_CHECKING
1877  /* Note that all_frozen value does not matter when !all_visible */
1878  if (prunestate->all_visible && lpdead_items == 0)
1879  {
1880  TransactionId cutoff;
1881  bool all_frozen;
1882 
1883  if (!heap_page_is_all_visible(vacrel, buf, &cutoff, &all_frozen))
1884  Assert(false);
1885 
1886  Assert(!TransactionIdIsValid(cutoff) ||
1887  cutoff == prunestate->visibility_cutoff_xid);
1888  }
1889 #endif
1890 
1891  /*
1892  * Now save details of the LP_DEAD items from the page in vacrel
1893  */
1894  if (lpdead_items > 0)
1895  {
1896  VacDeadItems *dead_items = vacrel->dead_items;
1897  ItemPointerData tmp;
1898 
1899  vacrel->lpdead_item_pages++;
1900  prunestate->has_lpdead_items = true;
1901 
1902  ItemPointerSetBlockNumber(&tmp, blkno);
1903 
1904  for (int i = 0; i < lpdead_items; i++)
1905  {
1906  ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]);
1907  dead_items->items[dead_items->num_items++] = tmp;
1908  }
1909 
1910  Assert(dead_items->num_items <= dead_items->max_items);
1912  dead_items->num_items);
1913 
1914  /*
1915  * It was convenient to ignore LP_DEAD items in all_visible earlier on
1916  * to make the choice of whether or not to freeze the page unaffected
1917  * by the short-term presence of LP_DEAD items. These LP_DEAD items
1918  * were effectively assumed to be LP_UNUSED items in the making. It
1919  * doesn't matter which heap pass (initial pass or final pass) ends up
1920  * setting the page all-frozen, as long as the ongoing VACUUM does it.
1921  *
1922  * Now that freezing has been finalized, unset all_visible. It needs
1923  * to reflect the present state of things, as expected by our caller.
1924  */
1925  prunestate->all_visible = false;
1926  }
1927 
1928  /* Finally, add page-local counts to whole-VACUUM counts */
1929  vacrel->tuples_deleted += tuples_deleted;
1930  vacrel->tuples_frozen += tuples_frozen;
1931  vacrel->lpdead_items += lpdead_items;
1932  vacrel->live_tuples += live_tuples;
1933  vacrel->recently_dead_tuples += recently_dead_tuples;
1934 }
void heap_freeze_execute_prepared(Relation rel, Buffer buffer, TransactionId snapshotConflictHorizon, HeapTupleFreeze *tuples, int ntuples)
Definition: heapam.c:6620
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen)
Definition: heapam.c:6317
HTSV_Result
Definition: heapam.h:95
int heap_page_prune(Relation relation, Buffer buffer, GlobalVisState *vistest, TransactionId old_snap_xmin, TimestampTz old_snap_ts, int *nnewlpdead, OffsetNumber *off_loc)
Definition: pruneheap.c:266
MultiXactId NoFreezePageRelminMxid
Definition: heapam.h:190
TransactionId FreezePageRelfrozenXid
Definition: heapam.h:178
bool freeze_required
Definition: heapam.h:152
MultiXactId FreezePageRelminMxid
Definition: heapam.h:179
TransactionId NoFreezePageRelfrozenXid
Definition: heapam.h:189
OffsetNumber offset
Definition: heapam.h:122
#define TransactionIdRetreat(dest)
Definition: transam.h:141
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:3226

References LVPagePruneState::all_frozen, LVPagePruneState::all_visible, Assert(), buf, BufferGetBlockNumber(), LVRelState::cutoffs, LVRelState::dead_items, elog(), ERROR, FirstOffsetNumber, HeapPageFreeze::freeze_required, HeapPageFreeze::FreezePageRelfrozenXid, HeapPageFreeze::FreezePageRelminMxid, LVRelState::frozen_pages, LVPagePruneState::has_lpdead_items, LVPagePruneState::hastup, heap_freeze_execute_prepared(), heap_page_is_all_visible(), heap_page_prune(), heap_prepare_freeze_tuple(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), i, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), VacDeadItems::items, LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, HeapPageFreeze::NoFreezePageRelfrozenXid, HeapPageFreeze::NoFreezePageRelminMxid, VacDeadItems::num_items, LVRelState::offnum, HeapTupleFreeze::offset, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), pgstat_progress_update_param(), pgWalUsage, PROGRESS_VACUUM_NUM_DEAD_TUPLES, LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, res, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, TransactionIdIsValid, TransactionIdPrecedes(), TransactionIdRetreat, LVRelState::tuples_deleted, LVRelState::tuples_frozen, unlikely, LVPagePruneState::visibility_cutoff_xid, LVRelState::vistest, and WalUsage::wal_fpi.

Referenced by lazy_scan_heap().

◆ lazy_scan_skip()

static BlockNumber lazy_scan_skip ( LVRelState vacrel,
Buffer vmbuffer,
BlockNumber  next_block,
bool next_unskippable_allvis,
bool skipping_current_range 
)
static

Definition at line 1293 of file vacuumlazy.c.

1295 {
1296  BlockNumber rel_pages = vacrel->rel_pages,
1297  next_unskippable_block = next_block,
1298  nskippable_blocks = 0;
1299  bool skipsallvis = false;
1300 
1301  *next_unskippable_allvis = true;
1302  while (next_unskippable_block < rel_pages)
1303  {
1304  uint8 mapbits = visibilitymap_get_status(vacrel->rel,
1305  next_unskippable_block,
1306  vmbuffer);
1307 
1308  if ((mapbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
1309  {
1310  Assert((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0);
1311  *next_unskippable_allvis = false;
1312  break;
1313  }
1314 
1315  /*
1316  * Caller must scan the last page to determine whether it has tuples
1317  * (caller must have the opportunity to set vacrel->nonempty_pages).
1318  * This rule avoids having lazy_truncate_heap() take access-exclusive
1319  * lock on rel to attempt a truncation that fails anyway, just because
1320  * there are tuples on the last page (it is likely that there will be
1321  * tuples on other nearby pages as well, but those can be skipped).
1322  *
1323  * Implement this by always treating the last block as unsafe to skip.
1324  */
1325  if (next_unskippable_block == rel_pages - 1)
1326  break;
1327 
1328  /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1329  if (!vacrel->skipwithvm)
1330  {
1331  /* Caller shouldn't rely on all_visible_according_to_vm */
1332  *next_unskippable_allvis = false;
1333  break;
1334  }
1335 
1336  /*
1337  * Aggressive VACUUM caller can't skip pages just because they are
1338  * all-visible. They may still skip all-frozen pages, which can't
1339  * contain XIDs < OldestXmin (XIDs that aren't already frozen by now).
1340  */
1341  if ((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0)
1342  {
1343  if (vacrel->aggressive)
1344  break;
1345 
1346  /*
1347  * All-visible block is safe to skip in non-aggressive case. But
1348  * remember that the final range contains such a block for later.
1349  */
1350  skipsallvis = true;
1351  }
1352 
1354  next_unskippable_block++;
1355  nskippable_blocks++;
1356  }
1357 
1358  /*
1359  * We only skip a range with at least SKIP_PAGES_THRESHOLD consecutive
1360  * pages. Since we're reading sequentially, the OS should be doing
1361  * readahead for us, so there's no gain in skipping a page now and then.
1362  * Skipping such a range might even discourage sequential detection.
1363  *
1364  * This test also enables more frequent relfrozenxid advancement during
1365  * non-aggressive VACUUMs. If the range has any all-visible pages then
1366  * skipping makes updating relfrozenxid unsafe, which is a real downside.
1367  */
1368  if (nskippable_blocks < SKIP_PAGES_THRESHOLD)
1369  *skipping_current_range = false;
1370  else
1371  {
1372  *skipping_current_range = true;
1373  if (skipsallvis)
1374  vacrel->skippedallvis = true;
1375  }
1376 
1377  return next_unskippable_block;
1378 }
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:116

References LVRelState::aggressive, Assert(), LVRelState::rel, LVRelState::rel_pages, SKIP_PAGES_THRESHOLD, LVRelState::skippedallvis, LVRelState::skipwithvm, vacuum_delay_point(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_get_status().

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 2831 of file vacuumlazy.c.

2832 {
2833  BlockNumber orig_rel_pages = vacrel->rel_pages;
2834  BlockNumber new_rel_pages;
2835  bool lock_waiter_detected;
2836  int lock_retry;
2837 
2838  /* Report that we are now truncating */
2841 
2842  /* Update error traceback information one last time */
2845 
2846  /*
2847  * Loop until no more truncating can be done.
2848  */
2849  do
2850  {
2851  /*
2852  * We need full exclusive lock on the relation in order to do
2853  * truncation. If we can't get it, give up rather than waiting --- we
2854  * don't want to block other backends, and we don't want to deadlock
2855  * (which is quite possible considering we already hold a lower-grade
2856  * lock).
2857  */
2858  lock_waiter_detected = false;
2859  lock_retry = 0;
2860  while (true)
2861  {
2863  break;
2864 
2865  /*
2866  * Check for interrupts while trying to (re-)acquire the exclusive
2867  * lock.
2868  */
2870 
2871  if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
2873  {
2874  /*
2875  * We failed to establish the lock in the specified number of
2876  * retries. This means we give up truncating.
2877  */
2878  ereport(vacrel->verbose ? INFO : DEBUG2,
2879  (errmsg("\"%s\": stopping truncate due to conflicting lock request",
2880  vacrel->relname)));
2881  return;
2882  }
2883 
2884  (void) WaitLatch(MyLatch,
2889  }
2890 
2891  /*
2892  * Now that we have exclusive lock, look to see if the rel has grown
2893  * whilst we were vacuuming with non-exclusive lock. If so, give up;
2894  * the newly added pages presumably contain non-deletable tuples.
2895  */
2896  new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
2897  if (new_rel_pages != orig_rel_pages)
2898  {
2899  /*
2900  * Note: we intentionally don't update vacrel->rel_pages with the
2901  * new rel size here. If we did, it would amount to assuming that
2902  * the new pages are empty, which is unlikely. Leaving the numbers
2903  * alone amounts to assuming that the new pages have the same
2904  * tuple density as existing ones, which is less unlikely.
2905  */
2907  return;
2908  }
2909 
2910  /*
2911  * Scan backwards from the end to verify that the end pages actually
2912  * contain no tuples. This is *necessary*, not optional, because
2913  * other backends could have added tuples to these pages whilst we
2914  * were vacuuming.
2915  */
2916  new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
2917  vacrel->blkno = new_rel_pages;
2918 
2919  if (new_rel_pages >= orig_rel_pages)
2920  {
2921  /* can't do anything after all */
2923  return;
2924  }
2925 
2926  /*
2927  * Okay to truncate.
2928  */
2929  RelationTruncate(vacrel->rel, new_rel_pages);
2930 
2931  /*
2932  * We can release the exclusive lock as soon as we have truncated.
2933  * Other backends can't safely access the relation until they have
2934  * processed the smgr invalidation that smgrtruncate sent out ... but
2935  * that should happen as part of standard invalidation processing once
2936  * they acquire lock on the relation.
2937  */
2939 
2940  /*
2941  * Update statistics. Here, it *is* correct to adjust rel_pages
2942  * without also touching reltuples, since the tuple count wasn't
2943  * changed by the truncation.
2944  */
2945  vacrel->removed_pages += orig_rel_pages - new_rel_pages;
2946  vacrel->rel_pages = new_rel_pages;
2947 
2948  ereport(vacrel->verbose ? INFO : DEBUG2,
2949  (errmsg("table \"%s\": truncated %u to %u pages",
2950  vacrel->relname,
2951  orig_rel_pages, new_rel_pages)));
2952  orig_rel_pages = new_rel_pages;
2953  } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
2954 }
struct Latch * MyLatch
Definition: globals.c:58
void ResetLatch(Latch *latch)
Definition: latch.c:699
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:492
#define WL_TIMEOUT
Definition: latch.h:128
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
#define WL_LATCH_SET
Definition: latch.h:125
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:311
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:276
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:34
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:287
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:87
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:88
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
Definition: vacuumlazy.c:2962
@ WAIT_EVENT_VACUUM_TRUNCATE
Definition: wait_event.h:153

References AccessExclusiveLock, LVRelState::blkno, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), INFO, InvalidOffsetNumber, MyLatch, LVRelState::nonempty_pages, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationTruncate(), LVRelState::relname, LVRelState::removed_pages, ResetLatch(), UnlockRelation(), update_vacuum_error_info(), VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, LVRelState::verbose, WAIT_EVENT_VACUUM_TRUNCATE, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 2190 of file vacuumlazy.c.

2191 {
2192  bool bypass;
2193 
2194  /* Should not end up here with no indexes */
2195  Assert(vacrel->nindexes > 0);
2196  Assert(vacrel->lpdead_item_pages > 0);
2197 
2198  if (!vacrel->do_index_vacuuming)
2199  {
2200  Assert(!vacrel->do_index_cleanup);
2201  vacrel->dead_items->num_items = 0;
2202  return;
2203  }
2204 
2205  /*
2206  * Consider bypassing index vacuuming (and heap vacuuming) entirely.
2207  *
2208  * We currently only do this in cases where the number of LP_DEAD items
2209  * for the entire VACUUM operation is close to zero. This avoids sharp
2210  * discontinuities in the duration and overhead of successive VACUUM
2211  * operations that run against the same table with a fixed workload.
2212  * Ideally, successive VACUUM operations will behave as if there are
2213  * exactly zero LP_DEAD items in cases where there are close to zero.
2214  *
2215  * This is likely to be helpful with a table that is continually affected
2216  * by UPDATEs that can mostly apply the HOT optimization, but occasionally
2217  * have small aberrations that lead to just a few heap pages retaining
2218  * only one or two LP_DEAD items. This is pretty common; even when the
2219  * DBA goes out of their way to make UPDATEs use HOT, it is practically
2220  * impossible to predict whether HOT will be applied in 100% of cases.
2221  * It's far easier to ensure that 99%+ of all UPDATEs against a table use
2222  * HOT through careful tuning.
2223  */
2224  bypass = false;
2225  if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
2226  {
2227  BlockNumber threshold;
2228 
2229  Assert(vacrel->num_index_scans == 0);
2230  Assert(vacrel->lpdead_items == vacrel->dead_items->num_items);
2231  Assert(vacrel->do_index_vacuuming);
2232  Assert(vacrel->do_index_cleanup);
2233 
2234  /*
2235  * This crossover point at which we'll start to do index vacuuming is
2236  * expressed as a percentage of the total number of heap pages in the
2237  * table that are known to have at least one LP_DEAD item. This is
2238  * much more important than the total number of LP_DEAD items, since
2239  * it's a proxy for the number of heap pages whose visibility map bits
2240  * cannot be set on account of bypassing index and heap vacuuming.
2241  *
2242  * We apply one further precautionary test: the space currently used
2243  * to store the TIDs (TIDs that now all point to LP_DEAD items) must
2244  * not exceed 32MB. This limits the risk that we will bypass index
2245  * vacuuming again and again until eventually there is a VACUUM whose
2246  * dead_items space is not CPU cache resident.
2247  *
2248  * We don't take any special steps to remember the LP_DEAD items (such
2249  * as counting them in our final update to the stats system) when the
2250  * optimization is applied. Though the accounting used in analyze.c's
2251  * acquire_sample_rows() will recognize the same LP_DEAD items as dead
2252  * rows in its own stats report, that's okay. The discrepancy should
2253  * be negligible. If this optimization is ever expanded to cover more
2254  * cases then this may need to be reconsidered.
2255  */
2256  threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
2257  bypass = (vacrel->lpdead_item_pages < threshold &&
2258  vacrel->lpdead_items < MAXDEADITEMS(32L * 1024L * 1024L));
2259  }
2260 
2261  if (bypass)
2262  {
2263  /*
2264  * There are almost zero TIDs. Behave as if there were precisely
2265  * zero: bypass index vacuuming, but do index cleanup.
2266  *
2267  * We expect that the ongoing VACUUM operation will finish very
2268  * quickly, so there is no point in considering speeding up as a
2269  * failsafe against wraparound failure. (Index cleanup is expected to
2270  * finish very quickly in cases where there were no ambulkdelete()
2271  * calls.)
2272  */
2273  vacrel->do_index_vacuuming = false;
2274  }
2275  else if (lazy_vacuum_all_indexes(vacrel))
2276  {
2277  /*
2278  * We successfully completed a round of index vacuuming. Do related
2279  * heap vacuuming now.
2280  */
2281  lazy_vacuum_heap_rel(vacrel);
2282  }
2283  else
2284  {
2285  /*
2286  * Failsafe case.
2287  *
2288  * We attempted index vacuuming, but didn't finish a full round/full
2289  * index scan. This happens when relfrozenxid or relminmxid is too
2290  * far in the past.
2291  *
2292  * From this point on the VACUUM operation will do no further index
2293  * vacuuming or heap vacuuming. This VACUUM operation won't end up
2294  * back here again.
2295  */
2296  Assert(vacrel->failsafe_active);
2297  }
2298 
2299  /*
2300  * Forget the LP_DEAD items that we just vacuumed (or just decided to not
2301  * vacuum)
2302  */
2303  vacrel->dead_items->num_items = 0;
2304 }
#define BYPASS_THRESHOLD_PAGES
Definition: vacuumlazy.c:94
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2315
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
Definition: vacuumlazy.c:2411

References Assert(), BYPASS_THRESHOLD_PAGES, LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::failsafe_active, lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAXDEADITEMS, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItems::num_items, and LVRelState::rel_pages.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 2315 of file vacuumlazy.c.

2316 {
2317  bool allindexes = true;
2318  double old_live_tuples = vacrel->rel->rd_rel->reltuples;
2319 
2320  Assert(vacrel->nindexes > 0);
2321  Assert(vacrel->do_index_vacuuming);
2322  Assert(vacrel->do_index_cleanup);
2323 
2324  /* Precheck for XID wraparound emergencies */
2325  if (lazy_check_wraparound_failsafe(vacrel))
2326  {
2327  /* Wraparound emergency -- don't even start an index scan */
2328  return false;
2329  }
2330 
2331  /* Report that we are now vacuuming indexes */
2334 
2335  if (!ParallelVacuumIsActive(vacrel))
2336  {
2337  for (int idx = 0; idx < vacrel->nindexes; idx++)
2338  {
2339  Relation indrel = vacrel->indrels[idx];
2340  IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2341 
2342  vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat,
2343  old_live_tuples,
2344  vacrel);
2345 
2346  if (lazy_check_wraparound_failsafe(vacrel))
2347  {
2348  /* Wraparound emergency -- end current index scan */
2349  allindexes = false;
2350  break;
2351  }
2352  }
2353  }
2354  else
2355  {
2356  /* Outsource everything to parallel variant */
2357  parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples,
2358  vacrel->num_index_scans);
2359 
2360  /*
2361  * Do a postcheck to consider applying wraparound failsafe now. Note
2362  * that parallel VACUUM only gets the precheck and this postcheck.
2363  */
2364  if (lazy_check_wraparound_failsafe(vacrel))
2365  allindexes = false;
2366  }
2367 
2368  /*
2369  * We delete all LP_DEAD items from the first heap pass in all indexes on
2370  * each call here (except calls where we choose to do the failsafe). This
2371  * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2372  * of the failsafe triggering, which prevents the next call from taking
2373  * place).
2374  */
2375  Assert(vacrel->num_index_scans > 0 ||
2376  vacrel->dead_items->num_items == vacrel->lpdead_items);
2377  Assert(allindexes || vacrel->failsafe_active);
2378 
2379  /*
2380  * Increase and report the number of index scans.
2381  *
2382  * We deliberately include the case where we started a round of bulk
2383  * deletes that we weren't able to finish due to the failsafe triggering.
2384  */
2385  vacrel->num_index_scans++;
2387  vacrel->num_index_scans);
2388 
2389  return allindexes;
2390 }
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:31
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
Definition: vacuumlazy.c:2700
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)

References Assert(), LVRelState::dead_items, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::failsafe_active, idx(), LVRelState::indrels, LVRelState::indstats, lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItems::num_items, parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_param(), PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, LVRelState::pvs, RelationData::rd_rel, and LVRelState::rel.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static int lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
int  index,
Buffer  vmbuffer 
)
static

Definition at line 2499 of file vacuumlazy.c.

2501 {
2502  VacDeadItems *dead_items = vacrel->dead_items;
2503  Page page = BufferGetPage(buffer);
2505  int nunused = 0;
2506  TransactionId visibility_cutoff_xid;
2507  bool all_frozen;
2508  LVSavedErrInfo saved_err_info;
2509 
2510  Assert(vacrel->nindexes == 0 || vacrel->do_index_vacuuming);
2511 
2513 
2514  /* Update error traceback information */
2515  update_vacuum_error_info(vacrel, &saved_err_info,
2518 
2520 
2521  for (; index < dead_items->num_items; index++)
2522  {
2523  BlockNumber tblk;
2524  OffsetNumber toff;
2525  ItemId itemid;
2526 
2527  tblk = ItemPointerGetBlockNumber(&dead_items->items[index]);
2528  if (tblk != blkno)
2529  break; /* past end of tuples for this block */
2530  toff = ItemPointerGetOffsetNumber(&dead_items->items[index]);
2531  itemid = PageGetItemId(page, toff);
2532 
2533  Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2534  ItemIdSetUnused(itemid);
2535  unused[nunused++] = toff;
2536  }
2537 
2538  Assert(nunused > 0);
2539 
2540  /* Attempt to truncate line pointer array now */
2542 
2543  /*
2544  * Mark buffer dirty before we write WAL.
2545  */
2546  MarkBufferDirty(buffer);
2547 
2548  /* XLOG stuff */
2549  if (RelationNeedsWAL(vacrel->rel))
2550  {
2551  xl_heap_vacuum xlrec;
2552  XLogRecPtr recptr;
2553 
2554  xlrec.nunused = nunused;
2555 
2556  XLogBeginInsert();
2557  XLogRegisterData((char *) &xlrec, SizeOfHeapVacuum);
2558 
2559  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
2560  XLogRegisterBufData(0, (char *) unused, nunused * sizeof(OffsetNumber));
2561 
2562  recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VACUUM);
2563 
2564  PageSetLSN(page, recptr);
2565  }
2566 
2567  /*
2568  * End critical section, so we safely can do visibility tests (which
2569  * possibly need to perform IO and allocate memory!). If we crash now the
2570  * page (including the corresponding vm bit) might not be marked all
2571  * visible, but that's fine. A later vacuum will fix that.
2572  */
2573  END_CRIT_SECTION();
2574 
2575  /*
2576  * Now that we have removed the LD_DEAD items from the page, once again
2577  * check if the page has become all-visible. The page is already marked
2578  * dirty, exclusively locked, and, if needed, a full page image has been
2579  * emitted.
2580  */
2581  Assert(!PageIsAllVisible(page));
2582  if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
2583  &all_frozen))
2584  {
2586 
2587  if (all_frozen)
2588  {
2589  Assert(!TransactionIdIsValid(visibility_cutoff_xid));
2590  flags |= VISIBILITYMAP_ALL_FROZEN;
2591  }
2592 
2593  PageSetAllVisible(page);
2594  visibilitymap_set(vacrel->rel, blkno, buffer, InvalidXLogRecPtr,
2595  vmbuffer, visibility_cutoff_xid, flags);
2596  }
2597 
2598  /* Revert to the previous phase information for error traceback */
2599  restore_vacuum_error_info(vacrel, &saved_err_info);
2600  return index;
2601 }
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:835
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
#define XLOG_HEAP2_VACUUM
Definition: heapam_xlog.h:55
#define SizeOfHeapVacuum
Definition: heapam_xlog.h:265
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
Definition: type.h:95
uint64 XLogRecPtr
Definition: xlogdefs.h:21
void XLogRegisterData(char *data, uint32 len)
Definition: xloginsert.c:351
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:451
void XLogRegisterBufData(uint8 block_id, char *data, uint32 len)
Definition: xloginsert.c:389
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:243
void XLogBeginInsert(void)
Definition: xloginsert.c:150
#define REGBUF_STANDARD
Definition: xloginsert.h:34

References Assert(), BufferGetPage(), LVRelState::dead_items, LVRelState::do_index_vacuuming, END_CRIT_SECTION, heap_page_is_all_visible(), InvalidOffsetNumber, InvalidXLogRecPtr, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, ItemPointerGetBlockNumber(), ItemPointerGetOffsetNumber(), VacDeadItems::items, MarkBufferDirty(), MaxHeapTuplesPerPage, LVRelState::nindexes, VacDeadItems::num_items, xl_heap_vacuum::nunused, PageGetItemId(), PageIsAllVisible(), PageSetAllVisible(), PageSetLSN(), PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, REGBUF_STANDARD, LVRelState::rel, RelationNeedsWAL, restore_vacuum_error_info(), SizeOfHeapVacuum, START_CRIT_SECTION, TransactionIdIsValid, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), XLOG_HEAP2_VACUUM, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by lazy_scan_heap(), and lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2411 of file vacuumlazy.c.

2412 {
2413  int index = 0;
2414  BlockNumber vacuumed_pages = 0;
2415  Buffer vmbuffer = InvalidBuffer;
2416  LVSavedErrInfo saved_err_info;
2417 
2418  Assert(vacrel->do_index_vacuuming);
2419  Assert(vacrel->do_index_cleanup);
2420  Assert(vacrel->num_index_scans > 0);
2421 
2422  /* Report that we are now vacuuming the heap */
2425 
2426  /* Update error traceback information */
2427  update_vacuum_error_info(vacrel, &saved_err_info,
2430 
2431  while (index < vacrel->dead_items->num_items)
2432  {
2433  BlockNumber blkno;
2434  Buffer buf;
2435  Page page;
2436  Size freespace;
2437 
2439 
2440  blkno = ItemPointerGetBlockNumber(&vacrel->dead_items->items[index]);
2441  vacrel->blkno = blkno;
2442 
2443  /*
2444  * Pin the visibility map page in case we need to mark the page
2445  * all-visible. In most cases this will be very cheap, because we'll
2446  * already have the correct page pinned anyway.
2447  */
2448  visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
2449 
2450  /* We need a non-cleanup exclusive lock to mark dead_items unused */
2451  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
2452  vacrel->bstrategy);
2454  index = lazy_vacuum_heap_page(vacrel, blkno, buf, index, vmbuffer);
2455 
2456  /* Now that we've vacuumed the page, record its available space */
2457  page = BufferGetPage(buf);
2458  freespace = PageGetHeapFreeSpace(page);
2459 
2461  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
2462  vacuumed_pages++;
2463  }
2464 
2465  vacrel->blkno = InvalidBlockNumber;
2466  if (BufferIsValid(vmbuffer))
2467  ReleaseBuffer(vmbuffer);
2468 
2469  /*
2470  * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2471  * the second heap pass. No more, no less.
2472  */
2473  Assert(index > 0);
2474  Assert(vacrel->num_index_scans > 1 ||
2475  (index == vacrel->lpdead_items &&
2476  vacuumed_pages == vacrel->lpdead_item_pages));
2477 
2478  ereport(DEBUG2,
2479  (errmsg("table \"%s\": removed %lld dead item identifiers in %u pages",
2480  vacrel->relname, (long long) index, vacuumed_pages)));
2481 
2482  /* Revert to the previous phase information for error traceback */
2483  restore_vacuum_error_info(vacrel, &saved_err_info);
2484 }
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:32

References Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetPage(), BufferIsValid(), LVRelState::dead_items, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, ereport, errmsg(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, ItemPointerGetBlockNumber(), VacDeadItems::items, lazy_vacuum_heap_page(), LockBuffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, LVRelState::num_index_scans, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelState::rel, ReleaseBuffer(), LVRelState::relname, restore_vacuum_error_info(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, and visibilitymap_pin().

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 2700 of file vacuumlazy.c.

2702 {
2703  IndexVacuumInfo ivinfo;
2704  LVSavedErrInfo saved_err_info;
2705 
2706  ivinfo.index = indrel;
2707  ivinfo.analyze_only = false;
2708  ivinfo.report_progress = false;
2709  ivinfo.estimated_count = true;
2710  ivinfo.message_level = DEBUG2;
2711  ivinfo.num_heap_tuples = reltuples;
2712  ivinfo.strategy = vacrel->bstrategy;
2713 
2714  /*
2715  * Update error traceback information.
2716  *
2717  * The index name is saved during this phase and restored immediately
2718  * after this phase. See vacuum_error_callback.
2719  */
2720  Assert(vacrel->indname == NULL);
2721  vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2722  update_vacuum_error_info(vacrel, &saved_err_info,
2725 
2726  /* Do bulk deletion */
2727  istat = vac_bulkdel_one_index(&ivinfo, istat, (void *) vacrel->dead_items);
2728 
2729  /* Revert to the previous phase information for error traceback */
2730  restore_vacuum_error_info(vacrel, &saved_err_info);
2731  pfree(vacrel->indname);
2732  vacrel->indname = NULL;
2733 
2734  return istat;
2735 }
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, VacDeadItems *dead_items)
Definition: vacuum.c:2337

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, LVRelState::dead_items, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3460 of file vacuumlazy.c.

3462 {
3463  vacrel->blkno = saved_vacrel->blkno;
3464  vacrel->offnum = saved_vacrel->offnum;
3465  vacrel->phase = saved_vacrel->phase;
3466 }
BlockNumber blkno
Definition: vacuumlazy.c:238
VacErrPhase phase
Definition: vacuumlazy.c:240
OffsetNumber offnum
Definition: vacuumlazy.c:239

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 2810 of file vacuumlazy.c.

2811 {
2812  BlockNumber possibly_freeable;
2813 
2814  if (!vacrel->do_rel_truncate || vacrel->failsafe_active ||
2816  return false;
2817 
2818  possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
2819  if (possibly_freeable > 0 &&
2820  (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
2821  possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION))
2822  return true;
2823 
2824  return false;
2825 }
int old_snapshot_threshold
Definition: snapmgr.c:79
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:76
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:77

References LVRelState::do_rel_truncate, LVRelState::failsafe_active, LVRelState::nonempty_pages, old_snapshot_threshold, LVRelState::rel_pages, REL_TRUNCATE_FRACTION, and REL_TRUNCATE_MINIMUM.

Referenced by heap_vacuum_rel().

◆ update_relstats_all_indexes()

static void update_relstats_all_indexes ( LVRelState vacrel)
static

Definition at line 3342 of file vacuumlazy.c.

3343 {
3344  Relation *indrels = vacrel->indrels;
3345  int nindexes = vacrel->nindexes;
3346  IndexBulkDeleteResult **indstats = vacrel->indstats;
3347 
3348  Assert(vacrel->do_index_cleanup);
3349 
3350  for (int idx = 0; idx < nindexes; idx++)
3351  {
3352  Relation indrel = indrels[idx];
3353  IndexBulkDeleteResult *istat = indstats[idx];
3354 
3355  if (istat == NULL || istat->estimated_count)
3356  continue;
3357 
3358  /* Update index statistics */
3359  vac_update_relstats(indrel,
3360  istat->num_pages,
3361  istat->num_index_tuples,
3362  0,
3363  false,
3366  NULL, NULL, false);
3367  }
3368 }
bool estimated_count
Definition: genam.h:77
double num_index_tuples
Definition: genam.h:78

References Assert(), LVRelState::do_index_cleanup, IndexBulkDeleteResult::estimated_count, idx(), LVRelState::indrels, LVRelState::indstats, InvalidMultiXactId, InvalidTransactionId, LVRelState::nindexes, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by heap_vacuum_rel().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3441 of file vacuumlazy.c.

3443 {
3444  if (saved_vacrel)
3445  {
3446  saved_vacrel->offnum = vacrel->offnum;
3447  saved_vacrel->blkno = vacrel->blkno;
3448  saved_vacrel->phase = vacrel->phase;
3449  }
3450 
3451  vacrel->blkno = blkno;
3452  vacrel->offnum = offnum;
3453  vacrel->phase = phase;
3454 }

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void *  arg)
static

Definition at line 3377 of file vacuumlazy.c.

3378 {
3379  LVRelState *errinfo = arg;
3380 
3381  switch (errinfo->phase)
3382  {
3384  if (BlockNumberIsValid(errinfo->blkno))
3385  {
3386  if (OffsetNumberIsValid(errinfo->offnum))
3387  errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3388  errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3389  else
3390  errcontext("while scanning block %u of relation \"%s.%s\"",
3391  errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3392  }
3393  else
3394  errcontext("while scanning relation \"%s.%s\"",
3395  errinfo->relnamespace, errinfo->relname);
3396  break;
3397 
3399  if (BlockNumberIsValid(errinfo->blkno))
3400  {
3401  if (OffsetNumberIsValid(errinfo->offnum))
3402  errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3403  errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3404  else
3405  errcontext("while vacuuming block %u of relation \"%s.%s\"",
3406  errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3407  }
3408  else
3409  errcontext("while vacuuming relation \"%s.%s\"",
3410  errinfo->relnamespace, errinfo->relname);
3411  break;
3412 
3414  errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3415  errinfo->indname, errinfo->relnamespace, errinfo->relname);
3416  break;
3417 
3419  errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3420  errinfo->indname, errinfo->relnamespace, errinfo->relname);
3421  break;
3422 
3424  if (BlockNumberIsValid(errinfo->blkno))
3425  errcontext("while truncating relation \"%s.%s\" to %u blocks",
3426  errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3427  break;
3428 
3430  default:
3431  return; /* do nothing; the errinfo may not be
3432  * initialized */
3433  }
3434 }
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define errcontext
Definition: elog.h:196
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
void * arg

References arg, LVRelState::blkno, BlockNumberIsValid(), errcontext, LVRelState::indname, LVRelState::offnum, OffsetNumberIsValid, LVRelState::phase, LVRelState::relname, LVRelState::relnamespace, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().