PostgreSQL Source Code  git master
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/amapi.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "catalog/index.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "optimizer/paths.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "tcop/tcopprot.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVPagePruneState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVPagePruneState LVPagePruneState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel)
 
static BlockNumber lazy_scan_skip (LVRelState *vacrel, Buffer *vmbuffer, BlockNumber next_block, bool *next_unskippable_allvis, bool *skipping_current_range)
 
static bool lazy_scan_new_or_empty (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
 
static void lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, LVPagePruneState *prunestate)
 
static bool lazy_scan_noprune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *hastup, bool *recordfreespace)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static int lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer *vmbuffer)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_is_all_visible (LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
static void update_relstats_all_indexes (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static int dead_items_max_items (LVRelState *vacrel)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 94 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 99 of file vacuumlazy.c.

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 127 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 121 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 77 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 76 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 115 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 108 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 86 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 88 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 87 of file vacuumlazy.c.

Typedef Documentation

◆ LVPagePruneState

◆ LVRelState

typedef struct LVRelState LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 130 of file vacuumlazy.c.

131 {
138 } VacErrPhase;
VacErrPhase
Definition: vacuumlazy.c:131
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition: vacuumlazy.c:133
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition: vacuumlazy.c:134
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition: vacuumlazy.c:137
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition: vacuumlazy.c:136
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition: vacuumlazy.c:135
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition: vacuumlazy.c:132

Function Documentation

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool lock_waiter_detected 
)
static

Definition at line 2934 of file vacuumlazy.c.

2935 {
2936  BlockNumber blkno;
2937  BlockNumber prefetchedUntil;
2938  instr_time starttime;
2939 
2940  /* Initialize the starttime if we check for conflicting lock requests */
2941  INSTR_TIME_SET_CURRENT(starttime);
2942 
2943  /*
2944  * Start checking blocks at what we believe relation end to be and move
2945  * backwards. (Strange coding of loop control is needed because blkno is
2946  * unsigned.) To make the scan faster, we prefetch a few blocks at a time
2947  * in forward direction, so that OS-level readahead can kick in.
2948  */
2949  blkno = vacrel->rel_pages;
2951  "prefetch size must be power of 2");
2952  prefetchedUntil = InvalidBlockNumber;
2953  while (blkno > vacrel->nonempty_pages)
2954  {
2955  Buffer buf;
2956  Page page;
2957  OffsetNumber offnum,
2958  maxoff;
2959  bool hastup;
2960 
2961  /*
2962  * Check if another process requests a lock on our relation. We are
2963  * holding an AccessExclusiveLock here, so they will be waiting. We
2964  * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
2965  * only check if that interval has elapsed once every 32 blocks to
2966  * keep the number of system calls and actual shared lock table
2967  * lookups to a minimum.
2968  */
2969  if ((blkno % 32) == 0)
2970  {
2971  instr_time currenttime;
2972  instr_time elapsed;
2973 
2974  INSTR_TIME_SET_CURRENT(currenttime);
2975  elapsed = currenttime;
2976  INSTR_TIME_SUBTRACT(elapsed, starttime);
2977  if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
2979  {
2981  {
2982  ereport(vacrel->verbose ? INFO : DEBUG2,
2983  (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
2984  vacrel->relname)));
2985 
2986  *lock_waiter_detected = true;
2987  return blkno;
2988  }
2989  starttime = currenttime;
2990  }
2991  }
2992 
2993  /*
2994  * We don't insert a vacuum delay point here, because we have an
2995  * exclusive lock on the table which we want to hold for as short a
2996  * time as possible. We still need to check for interrupts however.
2997  */
2999 
3000  blkno--;
3001 
3002  /* If we haven't prefetched this lot yet, do so now. */
3003  if (prefetchedUntil > blkno)
3004  {
3005  BlockNumber prefetchStart;
3006  BlockNumber pblkno;
3007 
3008  prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
3009  for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3010  {
3011  PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
3013  }
3014  prefetchedUntil = prefetchStart;
3015  }
3016 
3017  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
3018  vacrel->bstrategy);
3019 
3020  /* In this phase we only need shared access to the buffer */
3022 
3023  page = BufferGetPage(buf);
3024 
3025  if (PageIsNew(page) || PageIsEmpty(page))
3026  {
3028  continue;
3029  }
3030 
3031  hastup = false;
3032  maxoff = PageGetMaxOffsetNumber(page);
3033  for (offnum = FirstOffsetNumber;
3034  offnum <= maxoff;
3035  offnum = OffsetNumberNext(offnum))
3036  {
3037  ItemId itemid;
3038 
3039  itemid = PageGetItemId(page, offnum);
3040 
3041  /*
3042  * Note: any non-unused item should be taken as a reason to keep
3043  * this page. Even an LP_DEAD item makes truncation unsafe, since
3044  * we must not have cleaned out its index entries.
3045  */
3046  if (ItemIdIsUsed(itemid))
3047  {
3048  hastup = true;
3049  break; /* can stop scanning */
3050  }
3051  } /* scan along page */
3052 
3054 
3055  /* Done scanning if we found a tuple here */
3056  if (hastup)
3057  return blkno + 1;
3058  }
3059 
3060  /*
3061  * If we fall out of the loop, all the previously-thought-to-be-empty
3062  * pages still are; we need not bother to look at the last known-nonempty
3063  * page.
3064  */
3065  return vacrel->nonempty_pages;
3066 }
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:592
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3954
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4172
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:759
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:106
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:280
@ RBM_NORMAL
Definition: bufmgr.h:39
static bool PageIsEmpty(Page page)
Definition: bufpage.h:220
Pointer Page
Definition: bufpage.h:78
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static bool PageIsNew(Page page)
Definition: bufpage.h:230
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:869
int errmsg(const char *fmt,...)
Definition: elog.c:906
#define DEBUG2
Definition: elog.h:25
#define INFO
Definition: elog.h:30
#define ereport(elevel,...)
Definition: elog.h:145
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:89
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:103
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:138
struct timespec instr_time
Definition: instr_time.h:83
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:374
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
static char * buf
Definition: pg_test_fsync.c:67
@ MAIN_FORKNUM
Definition: relpath.h:50
bool verbose
Definition: vacuumlazy.c:188
BlockNumber nonempty_pages
Definition: vacuumlazy.c:204
Relation rel
Definition: vacuumlazy.c:143
BlockNumber rel_pages
Definition: vacuumlazy.c:198
BufferAccessStrategy bstrategy
Definition: vacuumlazy.c:162
char * relname
Definition: vacuumlazy.c:183
#define PREFETCH_SIZE
Definition: vacuumlazy.c:121
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:86

References AccessExclusiveLock, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BufferGetPage(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelState::nonempty_pages, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageIsNew(), PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelState::rel, LVRelState::rel_pages, LVRelState::relname, StaticAssertStmt, UnlockReleaseBuffer(), VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and LVRelState::verbose.

Referenced by lazy_truncate_heap().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 3116 of file vacuumlazy.c.

3117 {
3118  VacDeadItems *dead_items;
3119  int max_items;
3120 
3121  max_items = dead_items_max_items(vacrel);
3122  Assert(max_items >= MaxHeapTuplesPerPage);
3123 
3124  /*
3125  * Initialize state for a parallel vacuum. As of now, only one worker can
3126  * be used for an index, so we invoke parallelism only if there are at
3127  * least two indexes on a table.
3128  */
3129  if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
3130  {
3131  /*
3132  * Since parallel workers cannot access data in temporary tables, we
3133  * can't perform parallel vacuum on them.
3134  */
3135  if (RelationUsesLocalBuffers(vacrel->rel))
3136  {
3137  /*
3138  * Give warning only if the user explicitly tries to perform a
3139  * parallel vacuum on the temporary table.
3140  */
3141  if (nworkers > 0)
3142  ereport(WARNING,
3143  (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3144  vacrel->relname)));
3145  }
3146  else
3147  vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
3148  vacrel->nindexes, nworkers,
3149  max_items,
3150  vacrel->verbose ? INFO : DEBUG2,
3151  vacrel->bstrategy);
3152 
3153  /* If parallel mode started, dead_items space is allocated in DSM */
3154  if (ParallelVacuumIsActive(vacrel))
3155  {
3156  vacrel->dead_items = parallel_vacuum_get_dead_items(vacrel->pvs);
3157  return;
3158  }
3159  }
3160 
3161  /* Serial VACUUM case */
3162  dead_items = (VacDeadItems *) palloc(vac_max_items_to_alloc_size(max_items));
3163  dead_items->max_items = max_items;
3164  dead_items->num_items = 0;
3165 
3166  vacrel->dead_items = dead_items;
3167 }
#define WARNING
Definition: elog.h:32
#define MaxHeapTuplesPerPage
Definition: htup_details.h:568
Assert(fmt[strlen(fmt) - 1] !='\n')
void * palloc(Size size)
Definition: mcxt.c:1199
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:635
ParallelVacuumState * pvs
Definition: vacuumlazy.c:163
int nindexes
Definition: vacuumlazy.c:145
Relation * indrels
Definition: vacuumlazy.c:144
VacDeadItems * dead_items
Definition: vacuumlazy.c:197
bool do_index_vacuuming
Definition: vacuumlazy.c:157
int max_items
Definition: vacuum.h:243
int num_items
Definition: vacuum.h:244
Size vac_max_items_to_alloc_size(int max_items)
Definition: vacuum.c:2331
#define ParallelVacuumIsActive(vacrel)
Definition: vacuumlazy.c:127
static int dead_items_max_items(LVRelState *vacrel)
Definition: vacuumlazy.c:3077
VacDeadItems * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int max_items, int elevel, BufferAccessStrategy bstrategy)

References Assert(), LVRelState::bstrategy, LVRelState::dead_items, dead_items_max_items(), DEBUG2, LVRelState::do_index_vacuuming, ereport, errmsg(), LVRelState::indrels, INFO, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::nindexes, VacDeadItems::num_items, palloc(), parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, LVRelState::pvs, LVRelState::rel, RelationUsesLocalBuffers, LVRelState::relname, vac_max_items_to_alloc_size(), LVRelState::verbose, and WARNING.

Referenced by heap_vacuum_rel().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 3173 of file vacuumlazy.c.

3174 {
3175  if (!ParallelVacuumIsActive(vacrel))
3176  {
3177  /* Don't bother with pfree here */
3178  return;
3179  }
3180 
3181  /* End parallel mode */
3182  parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
3183  vacrel->pvs = NULL;
3184 }
IndexBulkDeleteResult ** indstats
Definition: vacuumlazy.c:210
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)

References LVRelState::indstats, parallel_vacuum_end(), ParallelVacuumIsActive, and LVRelState::pvs.

Referenced by heap_vacuum_rel().

◆ dead_items_max_items()

static int dead_items_max_items ( LVRelState vacrel)
static

Definition at line 3077 of file vacuumlazy.c.

3078 {
3079  int64 max_items;
3080  int vac_work_mem = IsAutoVacuumWorkerProcess() &&
3081  autovacuum_work_mem != -1 ?
3083 
3084  if (vacrel->nindexes > 0)
3085  {
3086  BlockNumber rel_pages = vacrel->rel_pages;
3087 
3088  max_items = MAXDEADITEMS(vac_work_mem * 1024L);
3089  max_items = Min(max_items, INT_MAX);
3090  max_items = Min(max_items, MAXDEADITEMS(MaxAllocSize));
3091 
3092  /* curious coding here to ensure the multiplication can't overflow */
3093  if ((BlockNumber) (max_items / MaxHeapTuplesPerPage) > rel_pages)
3094  max_items = rel_pages * MaxHeapTuplesPerPage;
3095 
3096  /* stay sane if small maintenance_work_mem */
3097  max_items = Max(max_items, MaxHeapTuplesPerPage);
3098  }
3099  else
3100  {
3101  /* One-pass case only stores a single heap page's TIDs at a time */
3102  max_items = MaxHeapTuplesPerPage;
3103  }
3104 
3105  return (int) max_items;
3106 }
int autovacuum_work_mem
Definition: autovacuum.c:118
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3314
#define Min(x, y)
Definition: c.h:937
#define Max(x, y)
Definition: c.h:931
int maintenance_work_mem
Definition: globals.c:127
#define MaxAllocSize
Definition: memutils.h:40
#define MAXDEADITEMS(avail_mem)
Definition: vacuum.h:250

References autovacuum_work_mem, IsAutoVacuumWorkerProcess(), maintenance_work_mem, Max, MaxAllocSize, MAXDEADITEMS, MaxHeapTuplesPerPage, Min, LVRelState::nindexes, and LVRelState::rel_pages.

Referenced by dead_items_alloc().

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( LVRelState vacrel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool all_frozen 
)
static

Definition at line 3198 of file vacuumlazy.c.

3201 {
3202  Page page = BufferGetPage(buf);
3204  OffsetNumber offnum,
3205  maxoff;
3206  bool all_visible = true;
3207 
3208  *visibility_cutoff_xid = InvalidTransactionId;
3209  *all_frozen = true;
3210 
3211  maxoff = PageGetMaxOffsetNumber(page);
3212  for (offnum = FirstOffsetNumber;
3213  offnum <= maxoff && all_visible;
3214  offnum = OffsetNumberNext(offnum))
3215  {
3216  ItemId itemid;
3217  HeapTupleData tuple;
3218 
3219  /*
3220  * Set the offset number so that we can display it along with any
3221  * error that occurred while processing this tuple.
3222  */
3223  vacrel->offnum = offnum;
3224  itemid = PageGetItemId(page, offnum);
3225 
3226  /* Unused or redirect line pointers are of no interest */
3227  if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
3228  continue;
3229 
3230  ItemPointerSet(&(tuple.t_self), blockno, offnum);
3231 
3232  /*
3233  * Dead line pointers can have index pointers pointing to them. So
3234  * they can't be treated as visible
3235  */
3236  if (ItemIdIsDead(itemid))
3237  {
3238  all_visible = false;
3239  *all_frozen = false;
3240  break;
3241  }
3242 
3243  Assert(ItemIdIsNormal(itemid));
3244 
3245  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3246  tuple.t_len = ItemIdGetLength(itemid);
3247  tuple.t_tableOid = RelationGetRelid(vacrel->rel);
3248 
3249  switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf))
3250  {
3251  case HEAPTUPLE_LIVE:
3252  {
3253  TransactionId xmin;
3254 
3255  /* Check comments in lazy_scan_prune. */
3257  {
3258  all_visible = false;
3259  *all_frozen = false;
3260  break;
3261  }
3262 
3263  /*
3264  * The inserter definitely committed. But is it old enough
3265  * that everyone sees it as committed?
3266  */
3267  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3268  if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin))
3269  {
3270  all_visible = false;
3271  *all_frozen = false;
3272  break;
3273  }
3274 
3275  /* Track newest xmin on page. */
3276  if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
3277  *visibility_cutoff_xid = xmin;
3278 
3279  /* Check whether this tuple is already frozen or not */
3280  if (all_visible && *all_frozen &&
3282  *all_frozen = false;
3283  }
3284  break;
3285 
3286  case HEAPTUPLE_DEAD:
3290  {
3291  all_visible = false;
3292  *all_frozen = false;
3293  break;
3294  }
3295  default:
3296  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3297  break;
3298  }
3299  } /* scan along page */
3300 
3301  /* Clear the offset information once we have processed the given page. */
3302  vacrel->offnum = InvalidOffsetNumber;
3303 
3304  return all_visible;
3305 }
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2763
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
uint32 TransactionId
Definition: c.h:588
#define ERROR
Definition: elog.h:35
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7262
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:97
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:98
@ HEAPTUPLE_LIVE
Definition: heapam.h:96
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:99
@ HEAPTUPLE_DEAD
Definition: heapam.h:95
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:308
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:319
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define InvalidOffsetNumber
Definition: off.h:26
#define RelationGetRelid(relation)
Definition: rel.h:501
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
TransactionId OldestXmin
Definition: vacuumlazy.c:171
OffsetNumber offnum
Definition: vacuumlazy.c:186
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:273
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:307
#define InvalidTransactionId
Definition: transam.h:31

References Assert(), buf, BufferGetBlockNumber(), BufferGetPage(), elog(), ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::offnum, OffsetNumberNext, LVRelState::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), and TransactionIdPrecedes().

Referenced by lazy_scan_prune(), and lazy_vacuum_heap_page().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 311 of file vacuumlazy.c.

313 {
314  LVRelState *vacrel;
315  bool verbose,
316  instrument,
317  aggressive,
318  skipwithvm,
319  frozenxid_updated,
320  minmulti_updated;
321  TransactionId OldestXmin,
322  FreezeLimit;
323  MultiXactId OldestMxact,
324  MultiXactCutoff;
325  BlockNumber orig_rel_pages,
326  new_rel_pages,
327  new_rel_allvisible;
328  PGRUsage ru0;
329  TimestampTz starttime = 0;
330  PgStat_Counter startreadtime = 0,
331  startwritetime = 0;
332  WalUsage startwalusage = pgWalUsage;
333  int64 StartPageHit = VacuumPageHit,
334  StartPageMiss = VacuumPageMiss,
335  StartPageDirty = VacuumPageDirty;
336  ErrorContextCallback errcallback;
337  char **indnames = NULL;
338 
339  verbose = (params->options & VACOPT_VERBOSE) != 0;
340  instrument = (verbose || (IsAutoVacuumWorkerProcess() &&
341  params->log_min_duration >= 0));
342  if (instrument)
343  {
344  pg_rusage_init(&ru0);
345  starttime = GetCurrentTimestamp();
346  if (track_io_timing)
347  {
348  startreadtime = pgStatBlockReadTime;
349  startwritetime = pgStatBlockWriteTime;
350  }
351  }
352 
354  RelationGetRelid(rel));
355 
356  /*
357  * Get OldestXmin cutoff, which is used to determine which deleted tuples
358  * are considered DEAD, not just RECENTLY_DEAD. Also get related cutoffs
359  * used to determine which XIDs/MultiXactIds will be frozen. If this is
360  * an aggressive VACUUM then lazy_scan_heap cannot leave behind unfrozen
361  * XIDs < FreezeLimit (all MXIDs < MultiXactCutoff also need to go away).
362  */
363  aggressive = vacuum_set_xid_limits(rel, params, &OldestXmin, &OldestMxact,
364  &FreezeLimit, &MultiXactCutoff);
365 
366  skipwithvm = true;
368  {
369  /*
370  * Force aggressive mode, and disable skipping blocks using the
371  * visibility map (even those set all-frozen)
372  */
373  aggressive = true;
374  skipwithvm = false;
375  }
376 
377  /*
378  * Setup error traceback support for ereport() first. The idea is to set
379  * up an error context callback to display additional information on any
380  * error during a vacuum. During different phases of vacuum, we update
381  * the state so that the error context callback always display current
382  * information.
383  *
384  * Copy the names of heap rel into local memory for error reporting
385  * purposes, too. It isn't always safe to assume that we can get the name
386  * of each rel. It's convenient for code in lazy_scan_heap to always use
387  * these temp copies.
388  */
389  vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
391  vacrel->relname = pstrdup(RelationGetRelationName(rel));
392  vacrel->indname = NULL;
394  vacrel->verbose = verbose;
395  errcallback.callback = vacuum_error_callback;
396  errcallback.arg = vacrel;
397  errcallback.previous = error_context_stack;
398  error_context_stack = &errcallback;
399  if (verbose)
400  {
402  if (aggressive)
403  ereport(INFO,
404  (errmsg("aggressively vacuuming \"%s.%s.%s\"",
406  vacrel->relnamespace, vacrel->relname)));
407  else
408  ereport(INFO,
409  (errmsg("vacuuming \"%s.%s.%s\"",
411  vacrel->relnamespace, vacrel->relname)));
412  }
413 
414  /* Set up high level stuff about rel and its indexes */
415  vacrel->rel = rel;
416  vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes,
417  &vacrel->indrels);
418  if (instrument && vacrel->nindexes > 0)
419  {
420  /* Copy index names used by instrumentation (not error reporting) */
421  indnames = palloc(sizeof(char *) * vacrel->nindexes);
422  for (int i = 0; i < vacrel->nindexes; i++)
423  indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
424  }
425 
426  /*
427  * The index_cleanup param either disables index vacuuming and cleanup or
428  * forces it to go ahead when we would otherwise apply the index bypass
429  * optimization. The default is 'auto', which leaves the final decision
430  * up to lazy_vacuum().
431  *
432  * The truncate param allows user to avoid attempting relation truncation,
433  * though it can't force truncation to happen.
434  */
437  params->truncate != VACOPTVALUE_AUTO);
438  vacrel->aggressive = aggressive;
439  vacrel->skipwithvm = skipwithvm;
440  vacrel->failsafe_active = false;
441  vacrel->consider_bypass_optimization = true;
442  vacrel->do_index_vacuuming = true;
443  vacrel->do_index_cleanup = true;
444  vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
445  if (params->index_cleanup == VACOPTVALUE_DISABLED)
446  {
447  /* Force disable index vacuuming up-front */
448  vacrel->do_index_vacuuming = false;
449  vacrel->do_index_cleanup = false;
450  }
451  else if (params->index_cleanup == VACOPTVALUE_ENABLED)
452  {
453  /* Force index vacuuming. Note that failsafe can still bypass. */
454  vacrel->consider_bypass_optimization = false;
455  }
456  else
457  {
458  /* Default/auto, make all decisions dynamically */
460  }
461 
462  vacrel->bstrategy = bstrategy;
463  vacrel->relfrozenxid = rel->rd_rel->relfrozenxid;
464  vacrel->relminmxid = rel->rd_rel->relminmxid;
465  vacrel->old_live_tuples = rel->rd_rel->reltuples;
466 
467  /* Initialize page counters explicitly (be tidy) */
468  vacrel->scanned_pages = 0;
469  vacrel->removed_pages = 0;
470  vacrel->frozen_pages = 0;
471  vacrel->lpdead_item_pages = 0;
472  vacrel->missed_dead_pages = 0;
473  vacrel->nonempty_pages = 0;
474  /* dead_items_alloc allocates vacrel->dead_items later on */
475 
476  /* Allocate/initialize output statistics state */
477  vacrel->new_rel_tuples = 0;
478  vacrel->new_live_tuples = 0;
479  vacrel->indstats = (IndexBulkDeleteResult **)
480  palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
481 
482  /* Initialize remaining counters (be tidy) */
483  vacrel->num_index_scans = 0;
484  vacrel->tuples_deleted = 0;
485  vacrel->tuples_frozen = 0;
486  vacrel->lpdead_items = 0;
487  vacrel->live_tuples = 0;
488  vacrel->recently_dead_tuples = 0;
489  vacrel->missed_dead_tuples = 0;
490 
491  /*
492  * Determine the extent of the blocks that we'll scan in lazy_scan_heap,
493  * and finalize cutoffs used for freezing and pruning in lazy_scan_prune.
494  *
495  * We expect vistest will always make heap_page_prune remove any deleted
496  * tuple whose xmax is < OldestXmin. lazy_scan_prune must never become
497  * confused about whether a tuple should be frozen or removed. (In the
498  * future we might want to teach lazy_scan_prune to recompute vistest from
499  * time to time, to increase the number of dead tuples it can prune away.)
500  *
501  * We must determine rel_pages _after_ OldestXmin has been established.
502  * lazy_scan_heap's physical heap scan (scan of pages < rel_pages) is
503  * thereby guaranteed to not miss any tuples with XIDs < OldestXmin. These
504  * XIDs must at least be considered for freezing (though not necessarily
505  * frozen) during its scan.
506  */
507  vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
508  vacrel->OldestXmin = OldestXmin;
509  vacrel->vistest = GlobalVisTestFor(rel);
510  /* FreezeLimit controls XID freezing (always <= OldestXmin) */
511  vacrel->FreezeLimit = FreezeLimit;
512  /* MultiXactCutoff controls MXID freezing (always <= OldestMxact) */
513  vacrel->MultiXactCutoff = MultiXactCutoff;
514  /* Initialize state used to track oldest extant XID/MXID */
515  vacrel->NewRelfrozenXid = OldestXmin;
516  vacrel->NewRelminMxid = OldestMxact;
517  vacrel->skippedallvis = false;
518 
519  /*
520  * Allocate dead_items array memory using dead_items_alloc. This handles
521  * parallel VACUUM initialization as part of allocating shared memory
522  * space used for dead_items. (But do a failsafe precheck first, to
523  * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
524  * is already dangerously old.)
525  */
527  dead_items_alloc(vacrel, params->nworkers);
528 
529  /*
530  * Call lazy_scan_heap to perform all required heap pruning, index
531  * vacuuming, and heap vacuuming (plus related processing)
532  */
533  lazy_scan_heap(vacrel);
534 
535  /*
536  * Free resources managed by dead_items_alloc. This ends parallel mode in
537  * passing when necessary.
538  */
539  dead_items_cleanup(vacrel);
541 
542  /*
543  * Update pg_class entries for each of rel's indexes where appropriate.
544  *
545  * Unlike the later update to rel's pg_class entry, this is not critical.
546  * Maintains relpages/reltuples statistics used by the planner only.
547  */
548  if (vacrel->do_index_cleanup)
550 
551  /* Done with rel's indexes */
552  vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
553 
554  /* Optionally truncate rel */
555  if (should_attempt_truncation(vacrel))
556  lazy_truncate_heap(vacrel);
557 
558  /* Pop the error context stack */
559  error_context_stack = errcallback.previous;
560 
561  /* Report that we are now doing final cleanup */
564 
565  /*
566  * Prepare to update rel's pg_class entry.
567  *
568  * Aggressive VACUUMs must always be able to advance relfrozenxid to a
569  * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
570  * Non-aggressive VACUUMs may advance them by any amount, or not at all.
571  */
572  Assert(vacrel->NewRelfrozenXid == OldestXmin ||
573  TransactionIdPrecedesOrEquals(aggressive ? FreezeLimit :
574  vacrel->relfrozenxid,
575  vacrel->NewRelfrozenXid));
576  Assert(vacrel->NewRelminMxid == OldestMxact ||
577  MultiXactIdPrecedesOrEquals(aggressive ? MultiXactCutoff :
578  vacrel->relminmxid,
579  vacrel->NewRelminMxid));
580  if (vacrel->skippedallvis)
581  {
582  /*
583  * Must keep original relfrozenxid in a non-aggressive VACUUM that
584  * chose to skip an all-visible page range. The state that tracks new
585  * values will have missed unfrozen XIDs from the pages we skipped.
586  */
587  Assert(!aggressive);
590  }
591 
592  /*
593  * For safety, clamp relallvisible to be not more than what we're setting
594  * pg_class.relpages to
595  */
596  new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
597  visibilitymap_count(rel, &new_rel_allvisible, NULL);
598  if (new_rel_allvisible > new_rel_pages)
599  new_rel_allvisible = new_rel_pages;
600 
601  /*
602  * Now actually update rel's pg_class entry.
603  *
604  * In principle new_live_tuples could be -1 indicating that we (still)
605  * don't know the tuple count. In practice that can't happen, since we
606  * scan every page that isn't skipped using the visibility map.
607  */
608  vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
609  new_rel_allvisible, vacrel->nindexes > 0,
610  vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
611  &frozenxid_updated, &minmulti_updated, false);
612 
613  /*
614  * Report results to the cumulative stats system, too.
615  *
616  * Deliberately avoid telling the stats system about LP_DEAD items that
617  * remain in the table due to VACUUM bypassing index and heap vacuuming.
618  * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
619  * It seems like a good idea to err on the side of not vacuuming again too
620  * soon in cases where the failsafe prevented significant amounts of heap
621  * vacuuming.
622  */
624  rel->rd_rel->relisshared,
625  Max(vacrel->new_live_tuples, 0),
626  vacrel->recently_dead_tuples +
627  vacrel->missed_dead_tuples);
629 
630  if (instrument)
631  {
632  TimestampTz endtime = GetCurrentTimestamp();
633 
634  if (verbose || params->log_min_duration == 0 ||
635  TimestampDifferenceExceeds(starttime, endtime,
636  params->log_min_duration))
637  {
638  long secs_dur;
639  int usecs_dur;
640  WalUsage walusage;
642  char *msgfmt;
643  int32 diff;
644  int64 PageHitOp = VacuumPageHit - StartPageHit,
645  PageMissOp = VacuumPageMiss - StartPageMiss,
646  PageDirtyOp = VacuumPageDirty - StartPageDirty;
647  double read_rate = 0,
648  write_rate = 0;
649 
650  TimestampDifference(starttime, endtime, &secs_dur, &usecs_dur);
651  memset(&walusage, 0, sizeof(WalUsage));
652  WalUsageAccumDiff(&walusage, &pgWalUsage, &startwalusage);
653 
655  if (verbose)
656  {
657  /*
658  * Aggressiveness already reported earlier, in dedicated
659  * VACUUM VERBOSE ereport
660  */
661  Assert(!params->is_wraparound);
662  msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
663  }
664  else if (params->is_wraparound)
665  {
666  /*
667  * While it's possible for a VACUUM to be both is_wraparound
668  * and !aggressive, that's just a corner-case -- is_wraparound
669  * implies aggressive. Produce distinct output for the corner
670  * case all the same, just in case.
671  */
672  if (aggressive)
673  msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
674  else
675  msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
676  }
677  else
678  {
679  if (aggressive)
680  msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
681  else
682  msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
683  }
684  appendStringInfo(&buf, msgfmt,
686  vacrel->relnamespace,
687  vacrel->relname,
688  vacrel->num_index_scans);
689  appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total)\n"),
690  vacrel->removed_pages,
691  new_rel_pages,
692  vacrel->scanned_pages,
693  orig_rel_pages == 0 ? 100.0 :
694  100.0 * vacrel->scanned_pages / orig_rel_pages);
696  _("tuples: %lld removed, %lld remain, %lld are dead but not yet removable\n"),
697  (long long) vacrel->tuples_deleted,
698  (long long) vacrel->new_rel_tuples,
699  (long long) vacrel->recently_dead_tuples);
700  if (vacrel->missed_dead_tuples > 0)
702  _("tuples missed: %lld dead from %u pages not removed due to cleanup lock contention\n"),
703  (long long) vacrel->missed_dead_tuples,
704  vacrel->missed_dead_pages);
705  diff = (int32) (ReadNextTransactionId() - OldestXmin);
707  _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
708  OldestXmin, diff);
709  if (frozenxid_updated)
710  {
711  diff = (int32) (vacrel->NewRelfrozenXid - vacrel->relfrozenxid);
713  _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
714  vacrel->NewRelfrozenXid, diff);
715  }
716  if (minmulti_updated)
717  {
718  diff = (int32) (vacrel->NewRelminMxid - vacrel->relminmxid);
720  _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
721  vacrel->NewRelminMxid, diff);
722  }
723  appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %lld tuples frozen\n"),
724  vacrel->frozen_pages,
725  orig_rel_pages == 0 ? 100.0 :
726  100.0 * vacrel->frozen_pages / orig_rel_pages,
727  (long long) vacrel->tuples_frozen);
728  if (vacrel->do_index_vacuuming)
729  {
730  if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
731  appendStringInfoString(&buf, _("index scan not needed: "));
732  else
733  appendStringInfoString(&buf, _("index scan needed: "));
734 
735  msgfmt = _("%u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n");
736  }
737  else
738  {
739  if (!vacrel->failsafe_active)
740  appendStringInfoString(&buf, _("index scan bypassed: "));
741  else
742  appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
743 
744  msgfmt = _("%u pages from table (%.2f%% of total) have %lld dead item identifiers\n");
745  }
746  appendStringInfo(&buf, msgfmt,
747  vacrel->lpdead_item_pages,
748  orig_rel_pages == 0 ? 100.0 :
749  100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
750  (long long) vacrel->lpdead_items);
751  for (int i = 0; i < vacrel->nindexes; i++)
752  {
753  IndexBulkDeleteResult *istat = vacrel->indstats[i];
754 
755  if (!istat)
756  continue;
757 
759  _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
760  indnames[i],
761  istat->num_pages,
762  istat->pages_newly_deleted,
763  istat->pages_deleted,
764  istat->pages_free);
765  }
766  if (track_io_timing)
767  {
768  double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
769  double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
770 
771  appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
772  read_ms, write_ms);
773  }
774  if (secs_dur > 0 || usecs_dur > 0)
775  {
776  read_rate = (double) BLCKSZ * PageMissOp / (1024 * 1024) /
777  (secs_dur + usecs_dur / 1000000.0);
778  write_rate = (double) BLCKSZ * PageDirtyOp / (1024 * 1024) /
779  (secs_dur + usecs_dur / 1000000.0);
780  }
781  appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
782  read_rate, write_rate);
784  _("buffer usage: %lld hits, %lld misses, %lld dirtied\n"),
785  (long long) PageHitOp,
786  (long long) PageMissOp,
787  (long long) PageDirtyOp);
789  _("WAL usage: %lld records, %lld full page images, %llu bytes\n"),
790  (long long) walusage.wal_records,
791  (long long) walusage.wal_fpi,
792  (unsigned long long) walusage.wal_bytes);
793  appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
794 
795  ereport(verbose ? INFO : LOG,
796  (errmsg_internal("%s", buf.data)));
797  pfree(buf.data);
798  }
799  }
800 
801  /* Cleanup index statistics and index names */
802  for (int i = 0; i < vacrel->nindexes; i++)
803  {
804  if (vacrel->indstats[i])
805  pfree(vacrel->indstats[i]);
806 
807  if (instrument)
808  pfree(indnames[i]);
809  }
810 }
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1664
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1719
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1573
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
bool track_io_timing
Definition: bufmgr.c:137
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:156
signed int int32
Definition: c.h:430
TransactionId MultiXactId
Definition: c.h:598
int64 TimestampTz
Definition: timestamp.h:39
char * get_database_name(Oid dbid)
Definition: dbcommands.c:2981
int errmsg_internal(const char *fmt,...)
Definition: elog.c:993
ErrorContextCallback * error_context_stack
Definition: elog.c:94
#define _(x)
Definition: elog.c:90
#define LOG
Definition: elog.h:27
int64 VacuumPageHit
Definition: globals.c:148
int64 VacuumPageMiss
Definition: globals.c:149
int64 VacuumPageDirty
Definition: globals.c:150
Oid MyDatabaseId
Definition: globals.c:89
WalUsage pgWalUsage
Definition: instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition: instrument.c:280
int i
Definition: isn.c:73
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3331
char * pstrdup(const char *in)
Definition: mcxt.c:1483
void pfree(void *pointer)
Definition: mcxt.c:1306
void * palloc0(Size size)
Definition: mcxt.c:1230
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3171
#define InvalidMultiXactId
Definition: multixact.h:24
static int verbose
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int64 PgStat_Counter
Definition: pgstat.h:88
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4066
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:35
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define RelationGetRelationName(relation)
Definition: rel.h:535
#define RelationGetNamespace(relation)
Definition: rel.h:542
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:176
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
struct ErrorContextCallback * previous
Definition: elog.h:234
void(* callback)(void *arg)
Definition: elog.h:235
BlockNumber pages_deleted
Definition: genam.h:81
BlockNumber pages_newly_deleted
Definition: genam.h:80
BlockNumber pages_free
Definition: genam.h:82
BlockNumber num_pages
Definition: genam.h:76
MultiXactId relminmxid
Definition: vacuumlazy.c:167
int64 tuples_deleted
Definition: vacuumlazy.c:215
MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:175
double old_live_tuples
Definition: vacuumlazy.c:168
bool do_rel_truncate
Definition: vacuumlazy.c:159
BlockNumber scanned_pages
Definition: vacuumlazy.c:199
bool aggressive
Definition: vacuumlazy.c:148
bool failsafe_active
Definition: vacuumlazy.c:152
GlobalVisState * vistest
Definition: vacuumlazy.c:172
BlockNumber removed_pages
Definition: vacuumlazy.c:200
int num_index_scans
Definition: vacuumlazy.c:213
double new_live_tuples
Definition: vacuumlazy.c:208
double new_rel_tuples
Definition: vacuumlazy.c:207
TransactionId NewRelfrozenXid
Definition: vacuumlazy.c:177
bool consider_bypass_optimization
Definition: vacuumlazy.c:154
TransactionId FreezeLimit
Definition: vacuumlazy.c:174
int64 recently_dead_tuples
Definition: vacuumlazy.c:219
int64 tuples_frozen
Definition: vacuumlazy.c:216
BlockNumber frozen_pages
Definition: vacuumlazy.c:201
BlockNumber missed_dead_pages
Definition: vacuumlazy.c:203
char * relnamespace
Definition: vacuumlazy.c:182
int64 live_tuples
Definition: vacuumlazy.c:218
int64 lpdead_items
Definition: vacuumlazy.c:217
bool skippedallvis
Definition: vacuumlazy.c:179
BlockNumber lpdead_item_pages
Definition: vacuumlazy.c:202
bool skipwithvm
Definition: vacuumlazy.c:150
bool do_index_cleanup
Definition: vacuumlazy.c:158
MultiXactId NewRelminMxid
Definition: vacuumlazy.c:178
int64 missed_dead_tuples
Definition: vacuumlazy.c:220
TransactionId relfrozenxid
Definition: vacuumlazy.c:166
VacErrPhase phase
Definition: vacuumlazy.c:187
char * indname
Definition: vacuumlazy.c:184
Form_pg_class rd_rel
Definition: rel.h:110
int nworkers
Definition: vacuum.h:235
VacOptValue truncate
Definition: vacuum.h:228
bits32 options
Definition: vacuum.h:216
bool is_wraparound
Definition: vacuum.h:223
int log_min_duration
Definition: vacuum.h:224
VacOptValue index_cleanup
Definition: vacuum.h:227
uint64 wal_bytes
Definition: instrument.h:53
int64 wal_fpi
Definition: instrument.h:52
int64 wal_records
Definition: instrument.h:51
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:292
static TransactionId ReadNextTransactionId(void)
Definition: transam.h:315
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:2102
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
Definition: vacuum.c:1282
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:2145
bool vacuum_set_xid_limits(Relation rel, const VacuumParams *params, TransactionId *OldestXmin, MultiXactId *OldestMxact, TransactionId *FreezeLimit, MultiXactId *MultiXactCutoff)
Definition: vacuum.c:940
#define VACOPT_VERBOSE
Definition: vacuum.h:185
@ VACOPTVALUE_AUTO
Definition: vacuum.h:203
@ VACOPTVALUE_ENABLED
Definition: vacuum.h:205
@ VACOPTVALUE_UNSPECIFIED
Definition: vacuum.h:202
@ VACOPTVALUE_DISABLED
Definition: vacuum.h:204
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition: vacuum.h:190
static void dead_items_cleanup(LVRelState *vacrel)
Definition: vacuumlazy.c:3173
static void update_relstats_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3311
static void vacuum_error_callback(void *arg)
Definition: vacuumlazy.c:3346
static void lazy_truncate_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:2803
static bool should_attempt_truncation(LVRelState *vacrel)
Definition: vacuumlazy.c:2782
static void lazy_scan_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:849
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
Definition: vacuumlazy.c:2582
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
Definition: vacuumlazy.c:3116
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
bool IsInParallelMode(void)
Definition: xact.c:1068

References _, LVRelState::aggressive, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert(), LVRelState::bstrategy, buf, ErrorContextCallback::callback, LVRelState::consider_bypass_optimization, dead_items_alloc(), dead_items_cleanup(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errmsg(), errmsg_internal(), error_context_stack, LVRelState::failsafe_active, LVRelState::FreezeLimit, LVRelState::frozen_pages, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), GlobalVisTestFor(), i, VacuumParams::index_cleanup, LVRelState::indname, LVRelState::indrels, LVRelState::indstats, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsAutoVacuumWorkerProcess(), IsInParallelMode(), lazy_check_wraparound_failsafe(), lazy_scan_heap(), lazy_truncate_heap(), LVRelState::live_tuples, LOG, VacuumParams::log_min_duration, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, Max, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, LVRelState::MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyDatabaseId, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, NoLock, LVRelState::nonempty_pages, LVRelState::num_index_scans, IndexBulkDeleteResult::num_pages, VacuumParams::nworkers, LVRelState::old_live_tuples, LVRelState::OldestXmin, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, LVRelState::phase, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, pstrdup(), RelationData::rd_rel, ReadNextTransactionId(), LVRelState::recently_dead_tuples, LVRelState::rel, LVRelState::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, LVRelState::relfrozenxid, LVRelState::relminmxid, LVRelState::relname, LVRelState::relnamespace, LVRelState::removed_pages, RowExclusiveLock, LVRelState::scanned_pages, should_attempt_truncation(), LVRelState::skippedallvis, LVRelState::skipwithvm, TimestampDifference(), TimestampDifferenceExceeds(), track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelState::tuples_deleted, LVRelState::tuples_frozen, update_relstats_all_indexes(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_set_xid_limits(), VacuumPageDirty, VacuumPageHit, VacuumPageMiss, LVRelState::verbose, verbose, visibilitymap_count(), LVRelState::vistest, WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_records, and WalUsageAccumDiff().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 2582 of file vacuumlazy.c.

2583 {
2586 
2587  /* Don't warn more than once per VACUUM */
2588  if (vacrel->failsafe_active)
2589  return true;
2590 
2592  vacrel->relminmxid)))
2593  {
2594  vacrel->failsafe_active = true;
2595 
2596  /* Disable index vacuuming, index cleanup, and heap rel truncation */
2597  vacrel->do_index_vacuuming = false;
2598  vacrel->do_index_cleanup = false;
2599  vacrel->do_rel_truncate = false;
2600 
2601  ereport(WARNING,
2602  (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2604  vacrel->relnamespace,
2605  vacrel->relname,
2606  vacrel->num_index_scans),
2607  errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
2608  errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2609  "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2610 
2611  /* Stop applying cost limits from this point on */
2612  VacuumCostActive = false;
2613  VacuumCostBalance = 0;
2614 
2615  return true;
2616  }
2617 
2618  return false;
2619 }
#define unlikely(x)
Definition: c.h:295
int errdetail(const char *fmt,...)
Definition: elog.c:1039
int errhint(const char *fmt,...)
Definition: elog.c:1153
bool VacuumCostActive
Definition: globals.c:153
int VacuumCostBalance
Definition: globals.c:152
#define MultiXactIdIsValid(multi)
Definition: multixact.h:28
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
bool vacuum_xid_failsafe_check(TransactionId relfrozenxid, MultiXactId relminmxid)
Definition: vacuum.c:1126

References Assert(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errdetail(), errhint(), errmsg(), LVRelState::failsafe_active, get_database_name(), MultiXactIdIsValid, MyDatabaseId, LVRelState::num_index_scans, LVRelState::relfrozenxid, LVRelState::relminmxid, LVRelState::relname, LVRelState::relnamespace, TransactionIdIsNormal, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, and WARNING.

Referenced by heap_vacuum_rel(), lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 2625 of file vacuumlazy.c.

2626 {
2627  double reltuples = vacrel->new_rel_tuples;
2628  bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
2629 
2630  Assert(vacrel->do_index_cleanup);
2631  Assert(vacrel->nindexes > 0);
2632 
2633  /* Report that we are now cleaning up indexes */
2636 
2637  if (!ParallelVacuumIsActive(vacrel))
2638  {
2639  for (int idx = 0; idx < vacrel->nindexes; idx++)
2640  {
2641  Relation indrel = vacrel->indrels[idx];
2642  IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2643 
2644  vacrel->indstats[idx] =
2645  lazy_cleanup_one_index(indrel, istat, reltuples,
2646  estimated_count, vacrel);
2647  }
2648  }
2649  else
2650  {
2651  /* Outsource everything to parallel variant */
2652  parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
2653  vacrel->num_index_scans,
2654  estimated_count);
2655  }
2656 }
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:33
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
Definition: vacuumlazy.c:2719
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)

References Assert(), LVRelState::do_index_cleanup, idx(), LVRelState::indrels, LVRelState::indstats, lazy_cleanup_one_index(), LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::num_index_scans, parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelState::pvs, LVRelState::rel_pages, and LVRelState::scanned_pages.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 2719 of file vacuumlazy.c.

2722 {
2723  IndexVacuumInfo ivinfo;
2724  LVSavedErrInfo saved_err_info;
2725 
2726  ivinfo.index = indrel;
2727  ivinfo.analyze_only = false;
2728  ivinfo.report_progress = false;
2729  ivinfo.estimated_count = estimated_count;
2730  ivinfo.message_level = DEBUG2;
2731 
2732  ivinfo.num_heap_tuples = reltuples;
2733  ivinfo.strategy = vacrel->bstrategy;
2734 
2735  /*
2736  * Update error traceback information.
2737  *
2738  * The index name is saved during this phase and restored immediately
2739  * after this phase. See vacuum_error_callback.
2740  */
2741  Assert(vacrel->indname == NULL);
2742  vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2743  update_vacuum_error_info(vacrel, &saved_err_info,
2746 
2747  istat = vac_cleanup_one_index(&ivinfo, istat);
2748 
2749  /* Revert to the previous phase information for error traceback */
2750  restore_vacuum_error_info(vacrel, &saved_err_info);
2751  pfree(vacrel->indname);
2752  vacrel->indname = NULL;
2753 
2754  return istat;
2755 }
Relation index
Definition: genam.h:46
double num_heap_tuples
Definition: genam.h:51
bool analyze_only
Definition: genam.h:47
BufferAccessStrategy strategy
Definition: genam.h:52
bool report_progress
Definition: genam.h:48
int message_level
Definition: genam.h:50
bool estimated_count
Definition: genam.h:49
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition: vacuum.c:2306
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
Definition: vacuumlazy.c:3429
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
Definition: vacuumlazy.c:3410

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel)
static

Definition at line 849 of file vacuumlazy.c.

850 {
851  BlockNumber rel_pages = vacrel->rel_pages,
852  blkno,
853  next_unskippable_block,
854  next_failsafe_block = 0,
855  next_fsm_block_to_vacuum = 0;
856  VacDeadItems *dead_items = vacrel->dead_items;
857  Buffer vmbuffer = InvalidBuffer;
858  bool next_unskippable_allvis,
859  skipping_current_range;
860  const int initprog_index[] = {
864  };
865  int64 initprog_val[3];
866 
867  /* Report that we're scanning the heap, advertising total # of blocks */
868  initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
869  initprog_val[1] = rel_pages;
870  initprog_val[2] = dead_items->max_items;
871  pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
872 
873  /* Set up an initial range of skippable blocks using the visibility map */
874  next_unskippable_block = lazy_scan_skip(vacrel, &vmbuffer, 0,
875  &next_unskippable_allvis,
876  &skipping_current_range);
877  for (blkno = 0; blkno < rel_pages; blkno++)
878  {
879  Buffer buf;
880  Page page;
881  bool all_visible_according_to_vm;
882  LVPagePruneState prunestate;
883 
884  if (blkno == next_unskippable_block)
885  {
886  /*
887  * Can't skip this page safely. Must scan the page. But
888  * determine the next skippable range after the page first.
889  */
890  all_visible_according_to_vm = next_unskippable_allvis;
891  next_unskippable_block = lazy_scan_skip(vacrel, &vmbuffer,
892  blkno + 1,
893  &next_unskippable_allvis,
894  &skipping_current_range);
895 
896  Assert(next_unskippable_block >= blkno + 1);
897  }
898  else
899  {
900  /* Last page always scanned (may need to set nonempty_pages) */
901  Assert(blkno < rel_pages - 1);
902 
903  if (skipping_current_range)
904  continue;
905 
906  /* Current range is too small to skip -- just scan the page */
907  all_visible_according_to_vm = true;
908  }
909 
910  vacrel->scanned_pages++;
911 
912  /* Report as block scanned, update error traceback information */
915  blkno, InvalidOffsetNumber);
916 
918 
919  /*
920  * Regularly check if wraparound failsafe should trigger.
921  *
922  * There is a similar check inside lazy_vacuum_all_indexes(), but
923  * relfrozenxid might start to look dangerously old before we reach
924  * that point. This check also provides failsafe coverage for the
925  * one-pass strategy, and the two-pass strategy with the index_cleanup
926  * param set to 'off'.
927  */
928  if (blkno - next_failsafe_block >= FAILSAFE_EVERY_PAGES)
929  {
931  next_failsafe_block = blkno;
932  }
933 
934  /*
935  * Consider if we definitely have enough space to process TIDs on page
936  * already. If we are close to overrunning the available space for
937  * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
938  * this page.
939  */
940  Assert(dead_items->max_items >= MaxHeapTuplesPerPage);
941  if (dead_items->max_items - dead_items->num_items < MaxHeapTuplesPerPage)
942  {
943  /*
944  * Before beginning index vacuuming, we release any pin we may
945  * hold on the visibility map page. This isn't necessary for
946  * correctness, but we do it anyway to avoid holding the pin
947  * across a lengthy, unrelated operation.
948  */
949  if (BufferIsValid(vmbuffer))
950  {
951  ReleaseBuffer(vmbuffer);
952  vmbuffer = InvalidBuffer;
953  }
954 
955  /* Perform a round of index and heap vacuuming */
956  vacrel->consider_bypass_optimization = false;
957  lazy_vacuum(vacrel);
958 
959  /*
960  * Vacuum the Free Space Map to make newly-freed space visible on
961  * upper-level FSM pages. Note we have not yet processed blkno.
962  */
963  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
964  blkno);
965  next_fsm_block_to_vacuum = blkno;
966 
967  /* Report that we are once again scanning the heap */
970  }
971 
972  /*
973  * Pin the visibility map page in case we need to mark the page
974  * all-visible. In most cases this will be very cheap, because we'll
975  * already have the correct page pinned anyway.
976  */
977  visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
978 
979  /* Finished preparatory checks. Actually scan the page. */
980  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno,
981  RBM_NORMAL, vacrel->bstrategy);
982  page = BufferGetPage(buf);
983 
984  /*
985  * We need a buffer cleanup lock to prune HOT chains and defragment
986  * the page in lazy_scan_prune. But when it's not possible to acquire
987  * a cleanup lock right away, we may be able to settle for reduced
988  * processing using lazy_scan_noprune.
989  */
991  {
992  bool hastup,
993  recordfreespace;
994 
996 
997  /* Check for new or empty pages before lazy_scan_noprune call */
998  if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, true,
999  vmbuffer))
1000  {
1001  /* Processed as new/empty page (lock and pin released) */
1002  continue;
1003  }
1004 
1005  /* Collect LP_DEAD items in dead_items array, count tuples */
1006  if (lazy_scan_noprune(vacrel, buf, blkno, page, &hastup,
1007  &recordfreespace))
1008  {
1009  Size freespace = 0;
1010 
1011  /*
1012  * Processed page successfully (without cleanup lock) -- just
1013  * need to perform rel truncation and FSM steps, much like the
1014  * lazy_scan_prune case. Don't bother trying to match its
1015  * visibility map setting steps, though.
1016  */
1017  if (hastup)
1018  vacrel->nonempty_pages = blkno + 1;
1019  if (recordfreespace)
1020  freespace = PageGetHeapFreeSpace(page);
1022  if (recordfreespace)
1023  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1024  continue;
1025  }
1026 
1027  /*
1028  * lazy_scan_noprune could not do all required processing. Wait
1029  * for a cleanup lock, and call lazy_scan_prune in the usual way.
1030  */
1031  Assert(vacrel->aggressive);
1034  }
1035 
1036  /* Check for new or empty pages before lazy_scan_prune call */
1037  if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, false, vmbuffer))
1038  {
1039  /* Processed as new/empty page (lock and pin released) */
1040  continue;
1041  }
1042 
1043  /*
1044  * Prune, freeze, and count tuples.
1045  *
1046  * Accumulates details of remaining LP_DEAD line pointers on page in
1047  * dead_items array. This includes LP_DEAD line pointers that we
1048  * pruned ourselves, as well as existing LP_DEAD line pointers that
1049  * were pruned some time earlier. Also considers freezing XIDs in the
1050  * tuple headers of remaining items with storage.
1051  */
1052  lazy_scan_prune(vacrel, buf, blkno, page, &prunestate);
1053 
1054  Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
1055 
1056  /* Remember the location of the last page with nonremovable tuples */
1057  if (prunestate.hastup)
1058  vacrel->nonempty_pages = blkno + 1;
1059 
1060  if (vacrel->nindexes == 0)
1061  {
1062  /*
1063  * Consider the need to do page-at-a-time heap vacuuming when
1064  * using the one-pass strategy now.
1065  *
1066  * The one-pass strategy will never call lazy_vacuum(). The steps
1067  * performed here can be thought of as the one-pass equivalent of
1068  * a call to lazy_vacuum().
1069  */
1070  if (prunestate.has_lpdead_items)
1071  {
1072  Size freespace;
1073 
1074  lazy_vacuum_heap_page(vacrel, blkno, buf, 0, &vmbuffer);
1075 
1076  /* Forget the LP_DEAD items that we just vacuumed */
1077  dead_items->num_items = 0;
1078 
1079  /*
1080  * Periodically perform FSM vacuuming to make newly-freed
1081  * space visible on upper FSM pages. Note we have not yet
1082  * performed FSM processing for blkno.
1083  */
1084  if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1085  {
1086  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1087  blkno);
1088  next_fsm_block_to_vacuum = blkno;
1089  }
1090 
1091  /*
1092  * Now perform FSM processing for blkno, and move on to next
1093  * page.
1094  *
1095  * Our call to lazy_vacuum_heap_page() will have considered if
1096  * it's possible to set all_visible/all_frozen independently
1097  * of lazy_scan_prune(). Note that prunestate was invalidated
1098  * by lazy_vacuum_heap_page() call.
1099  */
1100  freespace = PageGetHeapFreeSpace(page);
1101 
1103  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1104  continue;
1105  }
1106 
1107  /*
1108  * There was no call to lazy_vacuum_heap_page() because pruning
1109  * didn't encounter/create any LP_DEAD items that needed to be
1110  * vacuumed. Prune state has not been invalidated, so proceed
1111  * with prunestate-driven visibility map and FSM steps (just like
1112  * the two-pass strategy).
1113  */
1114  Assert(dead_items->num_items == 0);
1115  }
1116 
1117  /*
1118  * Handle setting visibility map bit based on information from the VM
1119  * (as of last lazy_scan_skip() call), and from prunestate
1120  */
1121  if (!all_visible_according_to_vm && prunestate.all_visible)
1122  {
1124 
1125  if (prunestate.all_frozen)
1126  flags |= VISIBILITYMAP_ALL_FROZEN;
1127 
1128  /*
1129  * It should never be the case that the visibility map page is set
1130  * while the page-level bit is clear, but the reverse is allowed
1131  * (if checksums are not enabled). Regardless, set both bits so
1132  * that we get back in sync.
1133  *
1134  * NB: If the heap page is all-visible but the VM bit is not set,
1135  * we don't need to dirty the heap page. However, if checksums
1136  * are enabled, we do need to make sure that the heap page is
1137  * dirtied before passing it to visibilitymap_set(), because it
1138  * may be logged. Given that this situation should only happen in
1139  * rare cases after a crash, it is not worth optimizing.
1140  */
1141  PageSetAllVisible(page);
1143  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1144  vmbuffer, prunestate.visibility_cutoff_xid,
1145  flags);
1146  }
1147 
1148  /*
1149  * As of PostgreSQL 9.2, the visibility map bit should never be set if
1150  * the page-level bit is clear. However, it's possible that the bit
1151  * got cleared after lazy_scan_skip() was called, so we must recheck
1152  * with buffer lock before concluding that the VM is corrupt.
1153  */
1154  else if (all_visible_according_to_vm && !PageIsAllVisible(page)
1155  && VM_ALL_VISIBLE(vacrel->rel, blkno, &vmbuffer))
1156  {
1157  elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1158  vacrel->relname, blkno);
1159  visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1161  }
1162 
1163  /*
1164  * It's possible for the value returned by
1165  * GetOldestNonRemovableTransactionId() to move backwards, so it's not
1166  * wrong for us to see tuples that appear to not be visible to
1167  * everyone yet, while PD_ALL_VISIBLE is already set. The real safe
1168  * xmin value never moves backwards, but
1169  * GetOldestNonRemovableTransactionId() is conservative and sometimes
1170  * returns a value that's unnecessarily small, so if we see that
1171  * contradiction it just means that the tuples that we think are not
1172  * visible to everyone yet actually are, and the PD_ALL_VISIBLE flag
1173  * is correct.
1174  *
1175  * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE
1176  * set, however.
1177  */
1178  else if (prunestate.has_lpdead_items && PageIsAllVisible(page))
1179  {
1180  elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
1181  vacrel->relname, blkno);
1182  PageClearAllVisible(page);
1184  visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1186  }
1187 
1188  /*
1189  * If the all-visible page is all-frozen but not marked as such yet,
1190  * mark it as all-frozen. Note that all_frozen is only valid if
1191  * all_visible is true, so we must check both prunestate fields.
1192  */
1193  else if (all_visible_according_to_vm && prunestate.all_visible &&
1194  prunestate.all_frozen &&
1195  !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
1196  {
1197  /*
1198  * We can pass InvalidTransactionId as the cutoff XID here,
1199  * because setting the all-frozen bit doesn't cause recovery
1200  * conflicts.
1201  */
1202  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1203  vmbuffer, InvalidTransactionId,
1205  }
1206 
1207  /*
1208  * Final steps for block: drop cleanup lock, record free space in the
1209  * FSM
1210  */
1211  if (prunestate.has_lpdead_items && vacrel->do_index_vacuuming)
1212  {
1213  /*
1214  * Wait until lazy_vacuum_heap_rel() to save free space. This
1215  * doesn't just save us some cycles; it also allows us to record
1216  * any additional free space that lazy_vacuum_heap_page() will
1217  * make available in cases where it's possible to truncate the
1218  * page's line pointer array.
1219  *
1220  * Note: It's not in fact 100% certain that we really will call
1221  * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip
1222  * index vacuuming (and so must skip heap vacuuming). This is
1223  * deemed okay because it only happens in emergencies, or when
1224  * there is very little free space anyway. (Besides, we start
1225  * recording free space in the FSM once index vacuuming has been
1226  * abandoned.)
1227  *
1228  * Note: The one-pass (no indexes) case is only supposed to make
1229  * it this far when there were no LP_DEAD items during pruning.
1230  */
1231  Assert(vacrel->nindexes > 0);
1233  }
1234  else
1235  {
1236  Size freespace = PageGetHeapFreeSpace(page);
1237 
1239  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1240  }
1241  }
1242 
1243  vacrel->blkno = InvalidBlockNumber;
1244  if (BufferIsValid(vmbuffer))
1245  ReleaseBuffer(vmbuffer);
1246 
1247  /* report that everything is now scanned */
1249 
1250  /* now we can compute the new value for pg_class.reltuples */
1251  vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1252  vacrel->scanned_pages,
1253  vacrel->live_tuples);
1254 
1255  /*
1256  * Also compute the total number of surviving heap entries. In the
1257  * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1258  */
1259  vacrel->new_rel_tuples =
1260  Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1261  vacrel->missed_dead_tuples;
1262 
1263  /*
1264  * Do index vacuuming (call each index's ambulkdelete routine), then do
1265  * related heap vacuuming
1266  */
1267  if (dead_items->num_items > 0)
1268  lazy_vacuum(vacrel);
1269 
1270  /*
1271  * Vacuum the remainder of the Free Space Map. We must do this whether or
1272  * not there were indexes, and whether or not we bypassed index vacuuming.
1273  */
1274  if (blkno > next_fsm_block_to_vacuum)
1275  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno);
1276 
1277  /* report all blocks vacuumed */
1279 
1280  /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1281  if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1282  lazy_cleanup_all_indexes(vacrel);
1283 }
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3931
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1583
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4229
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4406
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:105
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:228
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:991
static void PageClearAllVisible(Page page)
Definition: bufpage.h:436
static void PageSetAllVisible(Page page)
Definition: bufpage.h:431
static bool PageIsAllVisible(Page page)
Definition: bufpage.h:426
unsigned char uint8
Definition: c.h:440
size_t Size
Definition: c.h:541
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:354
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:182
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:30
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define PROGRESS_VACUUM_MAX_DEAD_TUPLES
Definition: progress.h:26
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
TransactionId visibility_cutoff_xid
Definition: vacuumlazy.c:238
BlockNumber blkno
Definition: vacuumlazy.c:185
void vacuum_delay_point(void)
Definition: vacuum.c:2166
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1186
static void lazy_vacuum(LVRelState *vacrel)
Definition: vacuumlazy.c:2149
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2625
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
Definition: vacuumlazy.c:1420
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *hastup, bool *recordfreespace)
Definition: vacuumlazy.c:1915
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, LVPagePruneState *prunestate)
Definition: vacuumlazy.c:1543
static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer *vmbuffer)
Definition: vacuumlazy.c:2455
static BlockNumber lazy_scan_skip(LVRelState *vacrel, Buffer *vmbuffer, BlockNumber next_block, bool *next_unskippable_allvis, bool *skipping_current_range)
Definition: vacuumlazy.c:1308
#define FAILSAFE_EVERY_PAGES
Definition: vacuumlazy.c:99
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:108
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VM_ALL_VISIBLE(r, b, v)
Definition: visibilitymap.h:24
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:26
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28

References LVRelState::aggressive, LVPagePruneState::all_frozen, LVPagePruneState::all_visible, Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage(), BufferIsValid(), ConditionalLockBufferForCleanup(), LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, elog(), FAILSAFE_EVERY_PAGES, FreeSpaceMapVacuumRange(), LVPagePruneState::has_lpdead_items, LVPagePruneState::hastup, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, InvalidTransactionId, InvalidXLogRecPtr, lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_new_or_empty(), lazy_scan_noprune(), lazy_scan_prune(), lazy_scan_skip(), lazy_vacuum(), lazy_vacuum_heap_page(), LVRelState::live_tuples, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, MarkBufferDirty(), Max, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::nonempty_pages, VacDeadItems::num_items, PageClearAllVisible(), PageGetHeapFreeSpace(), PageIsAllVisible(), PageSetAllVisible(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, RBM_NORMAL, ReadBufferExtended(), LVRelState::recently_dead_tuples, RecordPageWithFreeSpace(), LVRelState::rel, LVRelState::rel_pages, ReleaseBuffer(), LVRelState::relname, LVRelState::scanned_pages, UnlockReleaseBuffer(), update_vacuum_error_info(), vac_estimate_reltuples(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, LVPagePruneState::visibility_cutoff_xid, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_pin(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, VM_ALL_FROZEN, VM_ALL_VISIBLE, and WARNING.

Referenced by heap_vacuum_rel().

◆ lazy_scan_new_or_empty()

static bool lazy_scan_new_or_empty ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool  sharelock,
Buffer  vmbuffer 
)
static

Definition at line 1420 of file vacuumlazy.c.

1422 {
1423  Size freespace;
1424 
1425  if (PageIsNew(page))
1426  {
1427  /*
1428  * All-zeroes pages can be left over if either a backend extends the
1429  * relation by a single page, but crashes before the newly initialized
1430  * page has been written out, or when bulk-extending the relation
1431  * (which creates a number of empty pages at the tail end of the
1432  * relation), and then enters them into the FSM.
1433  *
1434  * Note we do not enter the page into the visibilitymap. That has the
1435  * downside that we repeatedly visit this page in subsequent vacuums,
1436  * but otherwise we'll never discover the space on a promoted standby.
1437  * The harm of repeated checking ought to normally not be too bad. The
1438  * space usually should be used at some point, otherwise there
1439  * wouldn't be any regular vacuums.
1440  *
1441  * Make sure these pages are in the FSM, to ensure they can be reused.
1442  * Do that by testing if there's any space recorded for the page. If
1443  * not, enter it. We do so after releasing the lock on the heap page,
1444  * the FSM is approximate, after all.
1445  */
1447 
1448  if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1449  {
1450  freespace = BLCKSZ - SizeOfPageHeaderData;
1451 
1452  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1453  }
1454 
1455  return true;
1456  }
1457 
1458  if (PageIsEmpty(page))
1459  {
1460  /*
1461  * It seems likely that caller will always be able to get a cleanup
1462  * lock on an empty page. But don't take any chances -- escalate to
1463  * an exclusive lock (still don't need a cleanup lock, though).
1464  */
1465  if (sharelock)
1466  {
1469 
1470  if (!PageIsEmpty(page))
1471  {
1472  /* page isn't new or empty -- keep lock and pin for now */
1473  return false;
1474  }
1475  }
1476  else
1477  {
1478  /* Already have a full cleanup lock (which is more than enough) */
1479  }
1480 
1481  /*
1482  * Unlike new pages, empty pages are always set all-visible and
1483  * all-frozen.
1484  */
1485  if (!PageIsAllVisible(page))
1486  {
1488 
1489  /* mark buffer dirty before writing a WAL record */
1491 
1492  /*
1493  * It's possible that another backend has extended the heap,
1494  * initialized the page, and then failed to WAL-log the page due
1495  * to an ERROR. Since heap extension is not WAL-logged, recovery
1496  * might try to replay our record setting the page all-visible and
1497  * find that the page isn't initialized, which will cause a PANIC.
1498  * To prevent that, check whether the page has been previously
1499  * WAL-logged, and if not, do that now.
1500  */
1501  if (RelationNeedsWAL(vacrel->rel) &&
1502  PageGetLSN(page) == InvalidXLogRecPtr)
1503  log_newpage_buffer(buf, true);
1504 
1505  PageSetAllVisible(page);
1506  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1507  vmbuffer, InvalidTransactionId,
1509  END_CRIT_SECTION();
1510  }
1511 
1512  freespace = PageGetHeapFreeSpace(page);
1514  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1515  return true;
1516  }
1517 
1518  /* page isn't new or empty -- keep lock and pin */
1519  return false;
1520 }
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:107
#define SizeOfPageHeaderData
Definition: bufpage.h:213
static XLogRecPtr PageGetLSN(Page page)
Definition: bufpage.h:383
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:232
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define END_CRIT_SECTION()
Definition: miscadmin.h:150
#define RelationNeedsWAL(relation)
Definition: rel.h:626
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1191

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, END_CRIT_SECTION, GetRecordedFreeSpace(), InvalidTransactionId, InvalidXLogRecPtr, LockBuffer(), log_newpage_buffer(), MarkBufferDirty(), PageGetHeapFreeSpace(), PageGetLSN(), PageIsAllVisible(), PageIsEmpty(), PageIsNew(), PageSetAllVisible(), RecordPageWithFreeSpace(), LVRelState::rel, RelationNeedsWAL, SizeOfPageHeaderData, START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_set().

Referenced by lazy_scan_heap().

◆ lazy_scan_noprune()

static bool lazy_scan_noprune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool hastup,
bool recordfreespace 
)
static

Definition at line 1915 of file vacuumlazy.c.

1921 {
1922  OffsetNumber offnum,
1923  maxoff;
1924  int lpdead_items,
1925  live_tuples,
1926  recently_dead_tuples,
1927  missed_dead_tuples;
1928  HeapTupleHeader tupleheader;
1929  TransactionId NewRelfrozenXid = vacrel->NewRelfrozenXid;
1930  MultiXactId NewRelminMxid = vacrel->NewRelminMxid;
1931  OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
1932 
1933  Assert(BufferGetBlockNumber(buf) == blkno);
1934 
1935  *hastup = false; /* for now */
1936  *recordfreespace = false; /* for now */
1937 
1938  lpdead_items = 0;
1939  live_tuples = 0;
1940  recently_dead_tuples = 0;
1941  missed_dead_tuples = 0;
1942 
1943  maxoff = PageGetMaxOffsetNumber(page);
1944  for (offnum = FirstOffsetNumber;
1945  offnum <= maxoff;
1946  offnum = OffsetNumberNext(offnum))
1947  {
1948  ItemId itemid;
1949  HeapTupleData tuple;
1950 
1951  vacrel->offnum = offnum;
1952  itemid = PageGetItemId(page, offnum);
1953 
1954  if (!ItemIdIsUsed(itemid))
1955  continue;
1956 
1957  if (ItemIdIsRedirected(itemid))
1958  {
1959  *hastup = true;
1960  continue;
1961  }
1962 
1963  if (ItemIdIsDead(itemid))
1964  {
1965  /*
1966  * Deliberately don't set hastup=true here. See same point in
1967  * lazy_scan_prune for an explanation.
1968  */
1969  deadoffsets[lpdead_items++] = offnum;
1970  continue;
1971  }
1972 
1973  *hastup = true; /* page prevents rel truncation */
1974  tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1975  if (heap_tuple_would_freeze(tupleheader,
1976  vacrel->FreezeLimit,
1977  vacrel->MultiXactCutoff,
1978  &NewRelfrozenXid, &NewRelminMxid))
1979  {
1980  /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
1981  if (vacrel->aggressive)
1982  {
1983  /*
1984  * Aggressive VACUUMs must always be able to advance rel's
1985  * relfrozenxid to a value >= FreezeLimit (and be able to
1986  * advance rel's relminmxid to a value >= MultiXactCutoff).
1987  * The ongoing aggressive VACUUM won't be able to do that
1988  * unless it can freeze an XID (or MXID) from this tuple now.
1989  *
1990  * The only safe option is to have caller perform processing
1991  * of this page using lazy_scan_prune. Caller might have to
1992  * wait a while for a cleanup lock, but it can't be helped.
1993  */
1994  vacrel->offnum = InvalidOffsetNumber;
1995  return false;
1996  }
1997 
1998  /*
1999  * Non-aggressive VACUUMs are under no obligation to advance
2000  * relfrozenxid (even by one XID). We can be much laxer here.
2001  *
2002  * Currently we always just accept an older final relfrozenxid
2003  * and/or relminmxid value. We never make caller wait or work a
2004  * little harder, even when it likely makes sense to do so.
2005  */
2006  }
2007 
2008  ItemPointerSet(&(tuple.t_self), blkno, offnum);
2009  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2010  tuple.t_len = ItemIdGetLength(itemid);
2011  tuple.t_tableOid = RelationGetRelid(vacrel->rel);
2012 
2013  switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf))
2014  {
2016  case HEAPTUPLE_LIVE:
2017 
2018  /*
2019  * Count both cases as live, just like lazy_scan_prune
2020  */
2021  live_tuples++;
2022 
2023  break;
2024  case HEAPTUPLE_DEAD:
2025 
2026  /*
2027  * There is some useful work for pruning to do, that won't be
2028  * done due to failure to get a cleanup lock.
2029  */
2030  missed_dead_tuples++;
2031  break;
2033 
2034  /*
2035  * Count in recently_dead_tuples, just like lazy_scan_prune
2036  */
2037  recently_dead_tuples++;
2038  break;
2040 
2041  /*
2042  * Do not count these rows as live, just like lazy_scan_prune
2043  */
2044  break;
2045  default:
2046  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2047  break;
2048  }
2049  }
2050 
2051  vacrel->offnum = InvalidOffsetNumber;
2052 
2053  /*
2054  * By here we know for sure that caller can put off freezing and pruning
2055  * this particular page until the next VACUUM. Remember its details now.
2056  * (lazy_scan_prune expects a clean slate, so we have to do this last.)
2057  */
2058  vacrel->NewRelfrozenXid = NewRelfrozenXid;
2059  vacrel->NewRelminMxid = NewRelminMxid;
2060 
2061  /* Save any LP_DEAD items found on the page in dead_items array */
2062  if (vacrel->nindexes == 0)
2063  {
2064  /* Using one-pass strategy (since table has no indexes) */
2065  if (lpdead_items > 0)
2066  {
2067  /*
2068  * Perfunctory handling for the corner case where a single pass
2069  * strategy VACUUM cannot get a cleanup lock, and it turns out
2070  * that there is one or more LP_DEAD items: just count the LP_DEAD
2071  * items as missed_dead_tuples instead. (This is a bit dishonest,
2072  * but it beats having to maintain specialized heap vacuuming code
2073  * forever, for vanishingly little benefit.)
2074  */
2075  *hastup = true;
2076  missed_dead_tuples += lpdead_items;
2077  }
2078 
2079  *recordfreespace = true;
2080  }
2081  else if (lpdead_items == 0)
2082  {
2083  /*
2084  * Won't be vacuuming this page later, so record page's freespace in
2085  * the FSM now
2086  */
2087  *recordfreespace = true;
2088  }
2089  else
2090  {
2091  VacDeadItems *dead_items = vacrel->dead_items;
2092  ItemPointerData tmp;
2093 
2094  /*
2095  * Page has LP_DEAD items, and so any references/TIDs that remain in
2096  * indexes will be deleted during index vacuuming (and then marked
2097  * LP_UNUSED in the heap)
2098  */
2099  vacrel->lpdead_item_pages++;
2100 
2101  ItemPointerSetBlockNumber(&tmp, blkno);
2102 
2103  for (int i = 0; i < lpdead_items; i++)
2104  {
2105  ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]);
2106  dead_items->items[dead_items->num_items++] = tmp;
2107  }
2108 
2109  Assert(dead_items->num_items <= dead_items->max_items);
2111  dead_items->num_items);
2112 
2113  vacrel->lpdead_items += lpdead_items;
2114 
2115  /*
2116  * Assume that we'll go on to vacuum this heap page during final pass
2117  * over the heap. Don't record free space until then.
2118  */
2119  *recordfreespace = false;
2120  }
2121 
2122  /*
2123  * Finally, add relevant page-local counts to whole-VACUUM counts
2124  */
2125  vacrel->live_tuples += live_tuples;
2126  vacrel->recently_dead_tuples += recently_dead_tuples;
2127  vacrel->missed_dead_tuples += missed_dead_tuples;
2128  if (missed_dead_tuples > 0)
2129  vacrel->missed_dead_pages++;
2130 
2131  /* Caller won't need to call lazy_scan_prune with same page */
2132  return true;
2133 }
bool heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, TransactionId *relfrozenxid_out, MultiXactId *relminmxid_out)
Definition: heapam.c:7315
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition: itemptr.h:147
#define PROGRESS_VACUUM_NUM_DEAD_TUPLES
Definition: progress.h:27
ItemPointerData items[FLEXIBLE_ARRAY_MEMBER]
Definition: vacuum.h:247

References LVRelState::aggressive, Assert(), buf, BufferGetBlockNumber(), LVRelState::dead_items, elog(), ERROR, FirstOffsetNumber, LVRelState::FreezeLimit, heap_tuple_would_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuum(), i, InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), VacDeadItems::items, LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, LVRelState::MultiXactCutoff, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, VacDeadItems::num_items, LVRelState::offnum, OffsetNumberNext, LVRelState::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), pgstat_progress_update_param(), PROGRESS_VACUUM_NUM_DEAD_TUPLES, LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by lazy_scan_heap().

◆ lazy_scan_prune()

static void lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
LVPagePruneState prunestate 
)
static

Definition at line 1543 of file vacuumlazy.c.

1548 {
1549  Relation rel = vacrel->rel;
1550  OffsetNumber offnum,
1551  maxoff;
1552  ItemId itemid;
1553  HeapTupleData tuple;
1554  HTSV_Result res;
1555  int tuples_deleted,
1556  tuples_frozen,
1557  lpdead_items,
1558  live_tuples,
1559  recently_dead_tuples;
1560  int nnewlpdead;
1561  TransactionId NewRelfrozenXid;
1562  MultiXactId NewRelminMxid;
1563  OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
1565 
1566  Assert(BufferGetBlockNumber(buf) == blkno);
1567 
1568  /*
1569  * maxoff might be reduced following line pointer array truncation in
1570  * heap_page_prune. That's safe for us to ignore, since the reclaimed
1571  * space will continue to look like LP_UNUSED items below.
1572  */
1573  maxoff = PageGetMaxOffsetNumber(page);
1574 
1575 retry:
1576 
1577  /* Initialize (or reset) page-level state */
1578  NewRelfrozenXid = vacrel->NewRelfrozenXid;
1579  NewRelminMxid = vacrel->NewRelminMxid;
1580  tuples_deleted = 0;
1581  tuples_frozen = 0;
1582  lpdead_items = 0;
1583  live_tuples = 0;
1584  recently_dead_tuples = 0;
1585 
1586  /*
1587  * Prune all HOT-update chains in this page.
1588  *
1589  * We count tuples removed by the pruning step as tuples_deleted. Its
1590  * final value can be thought of as the number of tuples that have been
1591  * deleted from the table. It should not be confused with lpdead_items;
1592  * lpdead_items's final value can be thought of as the number of tuples
1593  * that were deleted from indexes.
1594  */
1595  tuples_deleted = heap_page_prune(rel, buf, vacrel->vistest,
1596  InvalidTransactionId, 0, &nnewlpdead,
1597  &vacrel->offnum);
1598 
1599  /*
1600  * Now scan the page to collect LP_DEAD items and check for tuples
1601  * requiring freezing among remaining tuples with storage
1602  */
1603  prunestate->hastup = false;
1604  prunestate->has_lpdead_items = false;
1605  prunestate->all_visible = true;
1606  prunestate->all_frozen = true;
1608 
1609  for (offnum = FirstOffsetNumber;
1610  offnum <= maxoff;
1611  offnum = OffsetNumberNext(offnum))
1612  {
1613  bool tuple_totally_frozen;
1614 
1615  /*
1616  * Set the offset number so that we can display it along with any
1617  * error that occurred while processing this tuple.
1618  */
1619  vacrel->offnum = offnum;
1620  itemid = PageGetItemId(page, offnum);
1621 
1622  if (!ItemIdIsUsed(itemid))
1623  continue;
1624 
1625  /* Redirect items mustn't be touched */
1626  if (ItemIdIsRedirected(itemid))
1627  {
1628  prunestate->hastup = true; /* page won't be truncatable */
1629  continue;
1630  }
1631 
1632  /*
1633  * LP_DEAD items are processed outside of the loop.
1634  *
1635  * Note that we deliberately don't set hastup=true in the case of an
1636  * LP_DEAD item here, which is not how count_nondeletable_pages() does
1637  * it -- it only considers pages empty/truncatable when they have no
1638  * items at all (except LP_UNUSED items).
1639  *
1640  * Our assumption is that any LP_DEAD items we encounter here will
1641  * become LP_UNUSED inside lazy_vacuum_heap_page() before we actually
1642  * call count_nondeletable_pages(). In any case our opinion of
1643  * whether or not a page 'hastup' (which is how our caller sets its
1644  * vacrel->nonempty_pages value) is inherently race-prone. It must be
1645  * treated as advisory/unreliable, so we might as well be slightly
1646  * optimistic.
1647  */
1648  if (ItemIdIsDead(itemid))
1649  {
1650  deadoffsets[lpdead_items++] = offnum;
1651  prunestate->all_visible = false;
1652  prunestate->has_lpdead_items = true;
1653  continue;
1654  }
1655 
1656  Assert(ItemIdIsNormal(itemid));
1657 
1658  ItemPointerSet(&(tuple.t_self), blkno, offnum);
1659  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1660  tuple.t_len = ItemIdGetLength(itemid);
1661  tuple.t_tableOid = RelationGetRelid(rel);
1662 
1663  /*
1664  * DEAD tuples are almost always pruned into LP_DEAD line pointers by
1665  * heap_page_prune(), but it's possible that the tuple state changed
1666  * since heap_page_prune() looked. Handle that here by restarting.
1667  * (See comments at the top of function for a full explanation.)
1668  */
1669  res = HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf);
1670 
1671  if (unlikely(res == HEAPTUPLE_DEAD))
1672  goto retry;
1673 
1674  /*
1675  * The criteria for counting a tuple as live in this block need to
1676  * match what analyze.c's acquire_sample_rows() does, otherwise VACUUM
1677  * and ANALYZE may produce wildly different reltuples values, e.g.
1678  * when there are many recently-dead tuples.
1679  *
1680  * The logic here is a bit simpler than acquire_sample_rows(), as
1681  * VACUUM can't run inside a transaction block, which makes some cases
1682  * impossible (e.g. in-progress insert from the same transaction).
1683  *
1684  * We treat LP_DEAD items (which are the closest thing to DEAD tuples
1685  * that might be seen here) differently, too: we assume that they'll
1686  * become LP_UNUSED before VACUUM finishes. This difference is only
1687  * superficial. VACUUM effectively agrees with ANALYZE about DEAD
1688  * items, in the end. VACUUM won't remember LP_DEAD items, but only
1689  * because they're not supposed to be left behind when it is done.
1690  * (Cases where we bypass index vacuuming will violate this optimistic
1691  * assumption, but the overall impact of that should be negligible.)
1692  */
1693  switch (res)
1694  {
1695  case HEAPTUPLE_LIVE:
1696 
1697  /*
1698  * Count it as live. Not only is this natural, but it's also
1699  * what acquire_sample_rows() does.
1700  */
1701  live_tuples++;
1702 
1703  /*
1704  * Is the tuple definitely visible to all transactions?
1705  *
1706  * NB: Like with per-tuple hint bits, we can't set the
1707  * PD_ALL_VISIBLE flag if the inserter committed
1708  * asynchronously. See SetHintBits for more info. Check that
1709  * the tuple is hinted xmin-committed because of that.
1710  */
1711  if (prunestate->all_visible)
1712  {
1713  TransactionId xmin;
1714 
1716  {
1717  prunestate->all_visible = false;
1718  break;
1719  }
1720 
1721  /*
1722  * The inserter definitely committed. But is it old enough
1723  * that everyone sees it as committed?
1724  */
1725  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1726  if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin))
1727  {
1728  prunestate->all_visible = false;
1729  break;
1730  }
1731 
1732  /* Track newest xmin on page. */
1733  if (TransactionIdFollows(xmin, prunestate->visibility_cutoff_xid))
1734  prunestate->visibility_cutoff_xid = xmin;
1735  }
1736  break;
1738 
1739  /*
1740  * If tuple is recently dead then we must not remove it from
1741  * the relation. (We only remove items that are LP_DEAD from
1742  * pruning.)
1743  */
1744  recently_dead_tuples++;
1745  prunestate->all_visible = false;
1746  break;
1748 
1749  /*
1750  * We do not count these rows as live, because we expect the
1751  * inserting transaction to update the counters at commit, and
1752  * we assume that will happen only after we report our
1753  * results. This assumption is a bit shaky, but it is what
1754  * acquire_sample_rows() does, so be consistent.
1755  */
1756  prunestate->all_visible = false;
1757  break;
1759  /* This is an expected case during concurrent vacuum */
1760  prunestate->all_visible = false;
1761 
1762  /*
1763  * Count such rows as live. As above, we assume the deleting
1764  * transaction will commit and update the counters after we
1765  * report.
1766  */
1767  live_tuples++;
1768  break;
1769  default:
1770  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1771  break;
1772  }
1773 
1774  prunestate->hastup = true; /* page makes rel truncation unsafe */
1775 
1776  /* Tuple with storage -- consider need to freeze */
1778  vacrel->relfrozenxid,
1779  vacrel->relminmxid,
1780  vacrel->FreezeLimit,
1781  vacrel->MultiXactCutoff,
1782  &frozen[tuples_frozen],
1783  &tuple_totally_frozen,
1784  &NewRelfrozenXid, &NewRelminMxid))
1785  {
1786  /* Save prepared freeze plan for later */
1787  frozen[tuples_frozen++].offset = offnum;
1788  }
1789 
1790  /*
1791  * If tuple is not frozen (and not about to become frozen) then caller
1792  * had better not go on to set this page's VM bit
1793  */
1794  if (!tuple_totally_frozen)
1795  prunestate->all_frozen = false;
1796  }
1797 
1798  vacrel->offnum = InvalidOffsetNumber;
1799 
1800  /*
1801  * We have now divided every item on the page into either an LP_DEAD item
1802  * that will need to be vacuumed in indexes later, or a LP_NORMAL tuple
1803  * that remains and needs to be considered for freezing now (LP_UNUSED and
1804  * LP_REDIRECT items also remain, but are of no further interest to us).
1805  */
1806  vacrel->NewRelfrozenXid = NewRelfrozenXid;
1807  vacrel->NewRelminMxid = NewRelminMxid;
1808 
1809  /*
1810  * Consider the need to freeze any items with tuple storage from the page
1811  * first (arbitrary)
1812  */
1813  if (tuples_frozen > 0)
1814  {
1815  Assert(prunestate->hastup);
1816 
1817  vacrel->frozen_pages++;
1818 
1819  /* Execute all freeze plans for page as a single atomic action */
1820  heap_freeze_execute_prepared(vacrel->rel, buf, vacrel->FreezeLimit,
1821  frozen, tuples_frozen);
1822  }
1823 
1824  /*
1825  * The second pass over the heap can also set visibility map bits, using
1826  * the same approach. This is important when the table frequently has a
1827  * few old LP_DEAD items on each page by the time we get to it (typically
1828  * because past opportunistic pruning operations freed some non-HOT
1829  * tuples).
1830  *
1831  * VACUUM will call heap_page_is_all_visible() during the second pass over
1832  * the heap to determine all_visible and all_frozen for the page -- this
1833  * is a specialized version of the logic from this function. Now that
1834  * we've finished pruning and freezing, make sure that we're in total
1835  * agreement with heap_page_is_all_visible() using an assertion.
1836  */
1837 #ifdef USE_ASSERT_CHECKING
1838  /* Note that all_frozen value does not matter when !all_visible */
1839  if (prunestate->all_visible)
1840  {
1841  TransactionId cutoff;
1842  bool all_frozen;
1843 
1844  if (!heap_page_is_all_visible(vacrel, buf, &cutoff, &all_frozen))
1845  Assert(false);
1846 
1847  Assert(lpdead_items == 0);
1848  Assert(prunestate->all_frozen == all_frozen);
1849 
1850  /*
1851  * It's possible that we froze tuples and made the page's XID cutoff
1852  * (for recovery conflict purposes) FrozenTransactionId. This is okay
1853  * because visibility_cutoff_xid will be logged by our caller in a
1854  * moment.
1855  */
1856  Assert(cutoff == FrozenTransactionId ||
1857  cutoff == prunestate->visibility_cutoff_xid);
1858  }
1859 #endif
1860 
1861  /*
1862  * Now save details of the LP_DEAD items from the page in vacrel
1863  */
1864  if (lpdead_items > 0)
1865  {
1866  VacDeadItems *dead_items = vacrel->dead_items;
1867  ItemPointerData tmp;
1868 
1869  Assert(!prunestate->all_visible);
1870  Assert(prunestate->has_lpdead_items);
1871 
1872  vacrel->lpdead_item_pages++;
1873 
1874  ItemPointerSetBlockNumber(&tmp, blkno);
1875 
1876  for (int i = 0; i < lpdead_items; i++)
1877  {
1878  ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]);
1879  dead_items->items[dead_items->num_items++] = tmp;
1880  }
1881 
1882  Assert(dead_items->num_items <= dead_items->max_items);
1884  dead_items->num_items);
1885  }
1886 
1887  /* Finally, add page-local counts to whole-VACUUM counts */
1888  vacrel->tuples_deleted += tuples_deleted;
1889  vacrel->tuples_frozen += tuples_frozen;
1890  vacrel->lpdead_items += lpdead_items;
1891  vacrel->live_tuples += live_tuples;
1892  vacrel->recently_dead_tuples += recently_dead_tuples;
1893 }
void heap_freeze_execute_prepared(Relation rel, Buffer buffer, TransactionId FreezeLimit, HeapTupleFreeze *tuples, int ntuples)
Definition: heapam.c:6791
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, HeapTupleFreeze *frz, bool *totally_frozen, TransactionId *relfrozenxid_out, MultiXactId *relminmxid_out)
Definition: heapam.c:6473
HTSV_Result
Definition: heapam.h:94
int heap_page_prune(Relation relation, Buffer buffer, GlobalVisState *vistest, TransactionId old_snap_xmin, TimestampTz old_snap_ts, int *nnewlpdead, OffsetNumber *off_loc)
Definition: pruneheap.c:266
OffsetNumber offset
Definition: heapam.h:112
#define FrozenTransactionId
Definition: transam.h:33
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:3198

References LVPagePruneState::all_frozen, LVPagePruneState::all_visible, Assert(), buf, BufferGetBlockNumber(), LVRelState::dead_items, elog(), ERROR, FirstOffsetNumber, LVRelState::FreezeLimit, LVRelState::frozen_pages, FrozenTransactionId, LVPagePruneState::has_lpdead_items, LVPagePruneState::hastup, heap_freeze_execute_prepared(), heap_page_is_all_visible(), heap_page_prune(), heap_prepare_freeze_tuple(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), i, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), VacDeadItems::items, LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, VacDeadItems::max_items, MaxHeapTuplesPerPage, LVRelState::MultiXactCutoff, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, VacDeadItems::num_items, LVRelState::offnum, HeapTupleFreeze::offset, OffsetNumberNext, LVRelState::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), pgstat_progress_update_param(), PROGRESS_VACUUM_NUM_DEAD_TUPLES, LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, LVRelState::relfrozenxid, LVRelState::relminmxid, res, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdPrecedes(), LVRelState::tuples_deleted, LVRelState::tuples_frozen, unlikely, LVPagePruneState::visibility_cutoff_xid, and LVRelState::vistest.

Referenced by lazy_scan_heap().

◆ lazy_scan_skip()

static BlockNumber lazy_scan_skip ( LVRelState vacrel,
Buffer vmbuffer,
BlockNumber  next_block,
bool next_unskippable_allvis,
bool skipping_current_range 
)
static

Definition at line 1308 of file vacuumlazy.c.

1310 {
1311  BlockNumber rel_pages = vacrel->rel_pages,
1312  next_unskippable_block = next_block,
1313  nskippable_blocks = 0;
1314  bool skipsallvis = false;
1315 
1316  *next_unskippable_allvis = true;
1317  while (next_unskippable_block < rel_pages)
1318  {
1319  uint8 mapbits = visibilitymap_get_status(vacrel->rel,
1320  next_unskippable_block,
1321  vmbuffer);
1322 
1323  if ((mapbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
1324  {
1325  Assert((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0);
1326  *next_unskippable_allvis = false;
1327  break;
1328  }
1329 
1330  /*
1331  * Caller must scan the last page to determine whether it has tuples
1332  * (caller must have the opportunity to set vacrel->nonempty_pages).
1333  * This rule avoids having lazy_truncate_heap() take access-exclusive
1334  * lock on rel to attempt a truncation that fails anyway, just because
1335  * there are tuples on the last page (it is likely that there will be
1336  * tuples on other nearby pages as well, but those can be skipped).
1337  *
1338  * Implement this by always treating the last block as unsafe to skip.
1339  */
1340  if (next_unskippable_block == rel_pages - 1)
1341  break;
1342 
1343  /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1344  if (!vacrel->skipwithvm)
1345  break;
1346 
1347  /*
1348  * Aggressive VACUUM caller can't skip pages just because they are
1349  * all-visible. They may still skip all-frozen pages, which can't
1350  * contain XIDs < OldestXmin (XIDs that aren't already frozen by now).
1351  */
1352  if ((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0)
1353  {
1354  if (vacrel->aggressive)
1355  break;
1356 
1357  /*
1358  * All-visible block is safe to skip in non-aggressive case. But
1359  * remember that the final range contains such a block for later.
1360  */
1361  skipsallvis = true;
1362  }
1363 
1365  next_unskippable_block++;
1366  nskippable_blocks++;
1367  }
1368 
1369  /*
1370  * We only skip a range with at least SKIP_PAGES_THRESHOLD consecutive
1371  * pages. Since we're reading sequentially, the OS should be doing
1372  * readahead for us, so there's no gain in skipping a page now and then.
1373  * Skipping such a range might even discourage sequential detection.
1374  *
1375  * This test also enables more frequent relfrozenxid advancement during
1376  * non-aggressive VACUUMs. If the range has any all-visible pages then
1377  * skipping makes updating relfrozenxid unsafe, which is a real downside.
1378  */
1379  if (nskippable_blocks < SKIP_PAGES_THRESHOLD)
1380  *skipping_current_range = false;
1381  else
1382  {
1383  *skipping_current_range = true;
1384  if (skipsallvis)
1385  vacrel->skippedallvis = true;
1386  }
1387 
1388  return next_unskippable_block;
1389 }
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:115
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)

References LVRelState::aggressive, Assert(), LVRelState::rel, LVRelState::rel_pages, SKIP_PAGES_THRESHOLD, LVRelState::skippedallvis, LVRelState::skipwithvm, vacuum_delay_point(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_get_status().

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 2803 of file vacuumlazy.c.

2804 {
2805  BlockNumber orig_rel_pages = vacrel->rel_pages;
2806  BlockNumber new_rel_pages;
2807  bool lock_waiter_detected;
2808  int lock_retry;
2809 
2810  /* Report that we are now truncating */
2813 
2814  /* Update error traceback information one last time */
2817 
2818  /*
2819  * Loop until no more truncating can be done.
2820  */
2821  do
2822  {
2823  /*
2824  * We need full exclusive lock on the relation in order to do
2825  * truncation. If we can't get it, give up rather than waiting --- we
2826  * don't want to block other backends, and we don't want to deadlock
2827  * (which is quite possible considering we already hold a lower-grade
2828  * lock).
2829  */
2830  lock_waiter_detected = false;
2831  lock_retry = 0;
2832  while (true)
2833  {
2835  break;
2836 
2837  /*
2838  * Check for interrupts while trying to (re-)acquire the exclusive
2839  * lock.
2840  */
2842 
2843  if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
2845  {
2846  /*
2847  * We failed to establish the lock in the specified number of
2848  * retries. This means we give up truncating.
2849  */
2850  ereport(vacrel->verbose ? INFO : DEBUG2,
2851  (errmsg("\"%s\": stopping truncate due to conflicting lock request",
2852  vacrel->relname)));
2853  return;
2854  }
2855 
2856  (void) WaitLatch(MyLatch,
2861  }
2862 
2863  /*
2864  * Now that we have exclusive lock, look to see if the rel has grown
2865  * whilst we were vacuuming with non-exclusive lock. If so, give up;
2866  * the newly added pages presumably contain non-deletable tuples.
2867  */
2868  new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
2869  if (new_rel_pages != orig_rel_pages)
2870  {
2871  /*
2872  * Note: we intentionally don't update vacrel->rel_pages with the
2873  * new rel size here. If we did, it would amount to assuming that
2874  * the new pages are empty, which is unlikely. Leaving the numbers
2875  * alone amounts to assuming that the new pages have the same
2876  * tuple density as existing ones, which is less unlikely.
2877  */
2879  return;
2880  }
2881 
2882  /*
2883  * Scan backwards from the end to verify that the end pages actually
2884  * contain no tuples. This is *necessary*, not optional, because
2885  * other backends could have added tuples to these pages whilst we
2886  * were vacuuming.
2887  */
2888  new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
2889  vacrel->blkno = new_rel_pages;
2890 
2891  if (new_rel_pages >= orig_rel_pages)
2892  {
2893  /* can't do anything after all */
2895  return;
2896  }
2897 
2898  /*
2899  * Okay to truncate.
2900  */
2901  RelationTruncate(vacrel->rel, new_rel_pages);
2902 
2903  /*
2904  * We can release the exclusive lock as soon as we have truncated.
2905  * Other backends can't safely access the relation until they have
2906  * processed the smgr invalidation that smgrtruncate sent out ... but
2907  * that should happen as part of standard invalidation processing once
2908  * they acquire lock on the relation.
2909  */
2911 
2912  /*
2913  * Update statistics. Here, it *is* correct to adjust rel_pages
2914  * without also touching reltuples, since the tuple count wasn't
2915  * changed by the truncation.
2916  */
2917  vacrel->removed_pages += orig_rel_pages - new_rel_pages;
2918  vacrel->rel_pages = new_rel_pages;
2919 
2920  ereport(vacrel->verbose ? INFO : DEBUG2,
2921  (errmsg("table \"%s\": truncated %u to %u pages",
2922  vacrel->relname,
2923  orig_rel_pages, new_rel_pages)));
2924  orig_rel_pages = new_rel_pages;
2925  } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
2926 }
struct Latch * MyLatch
Definition: globals.c:58
void ResetLatch(Latch *latch)
Definition: latch.c:683
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:476
#define WL_TIMEOUT
Definition: latch.h:128
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
#define WL_LATCH_SET
Definition: latch.h:125
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:311
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:276
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:34
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:287
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:87
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:88
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
Definition: vacuumlazy.c:2934
@ WAIT_EVENT_VACUUM_TRUNCATE
Definition: wait_event.h:150

References AccessExclusiveLock, LVRelState::blkno, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), INFO, InvalidOffsetNumber, MyLatch, LVRelState::nonempty_pages, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationTruncate(), LVRelState::relname, LVRelState::removed_pages, ResetLatch(), UnlockRelation(), update_vacuum_error_info(), VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, LVRelState::verbose, WAIT_EVENT_VACUUM_TRUNCATE, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 2149 of file vacuumlazy.c.

2150 {
2151  bool bypass;
2152 
2153  /* Should not end up here with no indexes */
2154  Assert(vacrel->nindexes > 0);
2155  Assert(vacrel->lpdead_item_pages > 0);
2156 
2157  if (!vacrel->do_index_vacuuming)
2158  {
2159  Assert(!vacrel->do_index_cleanup);
2160  vacrel->dead_items->num_items = 0;
2161  return;
2162  }
2163 
2164  /*
2165  * Consider bypassing index vacuuming (and heap vacuuming) entirely.
2166  *
2167  * We currently only do this in cases where the number of LP_DEAD items
2168  * for the entire VACUUM operation is close to zero. This avoids sharp
2169  * discontinuities in the duration and overhead of successive VACUUM
2170  * operations that run against the same table with a fixed workload.
2171  * Ideally, successive VACUUM operations will behave as if there are
2172  * exactly zero LP_DEAD items in cases where there are close to zero.
2173  *
2174  * This is likely to be helpful with a table that is continually affected
2175  * by UPDATEs that can mostly apply the HOT optimization, but occasionally
2176  * have small aberrations that lead to just a few heap pages retaining
2177  * only one or two LP_DEAD items. This is pretty common; even when the
2178  * DBA goes out of their way to make UPDATEs use HOT, it is practically
2179  * impossible to predict whether HOT will be applied in 100% of cases.
2180  * It's far easier to ensure that 99%+ of all UPDATEs against a table use
2181  * HOT through careful tuning.
2182  */
2183  bypass = false;
2184  if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
2185  {
2186  BlockNumber threshold;
2187 
2188  Assert(vacrel->num_index_scans == 0);
2189  Assert(vacrel->lpdead_items == vacrel->dead_items->num_items);
2190  Assert(vacrel->do_index_vacuuming);
2191  Assert(vacrel->do_index_cleanup);
2192 
2193  /*
2194  * This crossover point at which we'll start to do index vacuuming is
2195  * expressed as a percentage of the total number of heap pages in the
2196  * table that are known to have at least one LP_DEAD item. This is
2197  * much more important than the total number of LP_DEAD items, since
2198  * it's a proxy for the number of heap pages whose visibility map bits
2199  * cannot be set on account of bypassing index and heap vacuuming.
2200  *
2201  * We apply one further precautionary test: the space currently used
2202  * to store the TIDs (TIDs that now all point to LP_DEAD items) must
2203  * not exceed 32MB. This limits the risk that we will bypass index
2204  * vacuuming again and again until eventually there is a VACUUM whose
2205  * dead_items space is not CPU cache resident.
2206  *
2207  * We don't take any special steps to remember the LP_DEAD items (such
2208  * as counting them in our final update to the stats system) when the
2209  * optimization is applied. Though the accounting used in analyze.c's
2210  * acquire_sample_rows() will recognize the same LP_DEAD items as dead
2211  * rows in its own stats report, that's okay. The discrepancy should
2212  * be negligible. If this optimization is ever expanded to cover more
2213  * cases then this may need to be reconsidered.
2214  */
2215  threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
2216  bypass = (vacrel->lpdead_item_pages < threshold &&
2217  vacrel->lpdead_items < MAXDEADITEMS(32L * 1024L * 1024L));
2218  }
2219 
2220  if (bypass)
2221  {
2222  /*
2223  * There are almost zero TIDs. Behave as if there were precisely
2224  * zero: bypass index vacuuming, but do index cleanup.
2225  *
2226  * We expect that the ongoing VACUUM operation will finish very
2227  * quickly, so there is no point in considering speeding up as a
2228  * failsafe against wraparound failure. (Index cleanup is expected to
2229  * finish very quickly in cases where there were no ambulkdelete()
2230  * calls.)
2231  */
2232  vacrel->do_index_vacuuming = false;
2233  }
2234  else if (lazy_vacuum_all_indexes(vacrel))
2235  {
2236  /*
2237  * We successfully completed a round of index vacuuming. Do related
2238  * heap vacuuming now.
2239  */
2240  lazy_vacuum_heap_rel(vacrel);
2241  }
2242  else
2243  {
2244  /*
2245  * Failsafe case.
2246  *
2247  * We attempted index vacuuming, but didn't finish a full round/full
2248  * index scan. This happens when relfrozenxid or relminmxid is too
2249  * far in the past.
2250  *
2251  * From this point on the VACUUM operation will do no further index
2252  * vacuuming or heap vacuuming. This VACUUM operation won't end up
2253  * back here again.
2254  */
2255  Assert(vacrel->failsafe_active);
2256  }
2257 
2258  /*
2259  * Forget the LP_DEAD items that we just vacuumed (or just decided to not
2260  * vacuum)
2261  */
2262  vacrel->dead_items->num_items = 0;
2263 }
#define BYPASS_THRESHOLD_PAGES
Definition: vacuumlazy.c:94
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2274
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
Definition: vacuumlazy.c:2369

References Assert(), BYPASS_THRESHOLD_PAGES, LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::failsafe_active, lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAXDEADITEMS, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItems::num_items, and LVRelState::rel_pages.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 2274 of file vacuumlazy.c.

2275 {
2276  bool allindexes = true;
2277 
2278  Assert(vacrel->nindexes > 0);
2279  Assert(vacrel->do_index_vacuuming);
2280  Assert(vacrel->do_index_cleanup);
2281 
2282  /* Precheck for XID wraparound emergencies */
2283  if (lazy_check_wraparound_failsafe(vacrel))
2284  {
2285  /* Wraparound emergency -- don't even start an index scan */
2286  return false;
2287  }
2288 
2289  /* Report that we are now vacuuming indexes */
2292 
2293  if (!ParallelVacuumIsActive(vacrel))
2294  {
2295  for (int idx = 0; idx < vacrel->nindexes; idx++)
2296  {
2297  Relation indrel = vacrel->indrels[idx];
2298  IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2299 
2300  vacrel->indstats[idx] =
2301  lazy_vacuum_one_index(indrel, istat, vacrel->old_live_tuples,
2302  vacrel);
2303 
2304  if (lazy_check_wraparound_failsafe(vacrel))
2305  {
2306  /* Wraparound emergency -- end current index scan */
2307  allindexes = false;
2308  break;
2309  }
2310  }
2311  }
2312  else
2313  {
2314  /* Outsource everything to parallel variant */
2316  vacrel->num_index_scans);
2317 
2318  /*
2319  * Do a postcheck to consider applying wraparound failsafe now. Note
2320  * that parallel VACUUM only gets the precheck and this postcheck.
2321  */
2322  if (lazy_check_wraparound_failsafe(vacrel))
2323  allindexes = false;
2324  }
2325 
2326  /*
2327  * We delete all LP_DEAD items from the first heap pass in all indexes on
2328  * each call here (except calls where we choose to do the failsafe). This
2329  * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2330  * of the failsafe triggering, which prevents the next call from taking
2331  * place).
2332  */
2333  Assert(vacrel->num_index_scans > 0 ||
2334  vacrel->dead_items->num_items == vacrel->lpdead_items);
2335  Assert(allindexes || vacrel->failsafe_active);
2336 
2337  /*
2338  * Increase and report the number of index scans.
2339  *
2340  * We deliberately include the case where we started a round of bulk
2341  * deletes that we weren't able to finish due to the failsafe triggering.
2342  */
2343  vacrel->num_index_scans++;
2345  vacrel->num_index_scans);
2346 
2347  return allindexes;
2348 }
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:31
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
Definition: vacuumlazy.c:2672
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)

References Assert(), LVRelState::dead_items, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::failsafe_active, idx(), LVRelState::indrels, LVRelState::indstats, lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItems::num_items, LVRelState::old_live_tuples, parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_param(), PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, and LVRelState::pvs.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static int lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
int  index,
Buffer vmbuffer 
)
static

Definition at line 2455 of file vacuumlazy.c.

2457 {
2458  VacDeadItems *dead_items = vacrel->dead_items;
2459  Page page = BufferGetPage(buffer);
2461  int uncnt = 0;
2462  TransactionId visibility_cutoff_xid;
2463  bool all_frozen;
2464  LVSavedErrInfo saved_err_info;
2465 
2466  Assert(vacrel->nindexes == 0 || vacrel->do_index_vacuuming);
2467 
2469 
2470  /* Update error traceback information */
2471  update_vacuum_error_info(vacrel, &saved_err_info,
2474 
2476 
2477  for (; index < dead_items->num_items; index++)
2478  {
2479  BlockNumber tblk;
2480  OffsetNumber toff;
2481  ItemId itemid;
2482 
2483  tblk = ItemPointerGetBlockNumber(&dead_items->items[index]);
2484  if (tblk != blkno)
2485  break; /* past end of tuples for this block */
2486  toff = ItemPointerGetOffsetNumber(&dead_items->items[index]);
2487  itemid = PageGetItemId(page, toff);
2488 
2489  Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2490  ItemIdSetUnused(itemid);
2491  unused[uncnt++] = toff;
2492  }
2493 
2494  Assert(uncnt > 0);
2495 
2496  /* Attempt to truncate line pointer array now */
2498 
2499  /*
2500  * Mark buffer dirty before we write WAL.
2501  */
2502  MarkBufferDirty(buffer);
2503 
2504  /* XLOG stuff */
2505  if (RelationNeedsWAL(vacrel->rel))
2506  {
2507  xl_heap_vacuum xlrec;
2508  XLogRecPtr recptr;
2509 
2510  xlrec.nunused = uncnt;
2511 
2512  XLogBeginInsert();
2513  XLogRegisterData((char *) &xlrec, SizeOfHeapVacuum);
2514 
2515  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
2516  XLogRegisterBufData(0, (char *) unused, uncnt * sizeof(OffsetNumber));
2517 
2518  recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VACUUM);
2519 
2520  PageSetLSN(page, recptr);
2521  }
2522 
2523  /*
2524  * End critical section, so we safely can do visibility tests (which
2525  * possibly need to perform IO and allocate memory!). If we crash now the
2526  * page (including the corresponding vm bit) might not be marked all
2527  * visible, but that's fine. A later vacuum will fix that.
2528  */
2529  END_CRIT_SECTION();
2530 
2531  /*
2532  * Now that we have removed the LD_DEAD items from the page, once again
2533  * check if the page has become all-visible. The page is already marked
2534  * dirty, exclusively locked, and, if needed, a full page image has been
2535  * emitted.
2536  */
2537  if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
2538  &all_frozen))
2539  PageSetAllVisible(page);
2540 
2541  /*
2542  * All the changes to the heap page have been done. If the all-visible
2543  * flag is now set, also set the VM all-visible bit (and, if possible, the
2544  * all-frozen bit) unless this has already been done previously.
2545  */
2546  if (PageIsAllVisible(page))
2547  {
2548  uint8 flags = 0;
2549  uint8 vm_status = visibilitymap_get_status(vacrel->rel,
2550  blkno, vmbuffer);
2551 
2552  /* Set the VM all-frozen bit to flag, if needed */
2553  if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
2554  flags |= VISIBILITYMAP_ALL_VISIBLE;
2555  if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
2556  flags |= VISIBILITYMAP_ALL_FROZEN;
2557 
2558  Assert(BufferIsValid(*vmbuffer));
2559  if (flags != 0)
2560  visibilitymap_set(vacrel->rel, blkno, buffer, InvalidXLogRecPtr,
2561  *vmbuffer, visibility_cutoff_xid, flags);
2562  }
2563 
2564  /* Revert to the previous phase information for error traceback */
2565  restore_vacuum_error_info(vacrel, &saved_err_info);
2566  return index;
2567 }
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:835
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
#define XLOG_HEAP2_VACUUM
Definition: heapam_xlog.h:55
#define SizeOfHeapVacuum
Definition: heapam_xlog.h:265
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
Definition: type.h:95
uint64 XLogRecPtr
Definition: xlogdefs.h:21
void XLogRegisterData(char *data, uint32 len)
Definition: xloginsert.c:351
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:451
void XLogRegisterBufData(uint8 block_id, char *data, uint32 len)
Definition: xloginsert.c:389
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:243
void XLogBeginInsert(void)
Definition: xloginsert.c:150
#define REGBUF_STANDARD
Definition: xloginsert.h:34

References Assert(), BufferGetPage(), BufferIsValid(), LVRelState::dead_items, LVRelState::do_index_vacuuming, END_CRIT_SECTION, heap_page_is_all_visible(), InvalidOffsetNumber, InvalidXLogRecPtr, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, ItemPointerGetBlockNumber(), ItemPointerGetOffsetNumber(), VacDeadItems::items, MarkBufferDirty(), MaxHeapTuplesPerPage, LVRelState::nindexes, VacDeadItems::num_items, xl_heap_vacuum::nunused, PageGetItemId(), PageIsAllVisible(), PageSetAllVisible(), PageSetLSN(), PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, REGBUF_STANDARD, LVRelState::rel, RelationNeedsWAL, restore_vacuum_error_info(), SizeOfHeapVacuum, START_CRIT_SECTION, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_get_status(), visibilitymap_set(), XLOG_HEAP2_VACUUM, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by lazy_scan_heap(), and lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2369 of file vacuumlazy.c.

2370 {
2371  int index;
2372  BlockNumber vacuumed_pages;
2373  Buffer vmbuffer = InvalidBuffer;
2374  LVSavedErrInfo saved_err_info;
2375 
2376  Assert(vacrel->do_index_vacuuming);
2377  Assert(vacrel->do_index_cleanup);
2378  Assert(vacrel->num_index_scans > 0);
2379 
2380  /* Report that we are now vacuuming the heap */
2383 
2384  /* Update error traceback information */
2385  update_vacuum_error_info(vacrel, &saved_err_info,
2388 
2389  vacuumed_pages = 0;
2390 
2391  index = 0;
2392  while (index < vacrel->dead_items->num_items)
2393  {
2394  BlockNumber tblk;
2395  Buffer buf;
2396  Page page;
2397  Size freespace;
2398 
2400 
2401  tblk = ItemPointerGetBlockNumber(&vacrel->dead_items->items[index]);
2402  vacrel->blkno = tblk;
2403  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, tblk, RBM_NORMAL,
2404  vacrel->bstrategy);
2406  index = lazy_vacuum_heap_page(vacrel, tblk, buf, index, &vmbuffer);
2407 
2408  /* Now that we've vacuumed the page, record its available space */
2409  page = BufferGetPage(buf);
2410  freespace = PageGetHeapFreeSpace(page);
2411 
2413  RecordPageWithFreeSpace(vacrel->rel, tblk, freespace);
2414  vacuumed_pages++;
2415  }
2416 
2417  /* Clear the block number information */
2418  vacrel->blkno = InvalidBlockNumber;
2419 
2420  if (BufferIsValid(vmbuffer))
2421  {
2422  ReleaseBuffer(vmbuffer);
2423  vmbuffer = InvalidBuffer;
2424  }
2425 
2426  /*
2427  * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2428  * the second heap pass. No more, no less.
2429  */
2430  Assert(index > 0);
2431  Assert(vacrel->num_index_scans > 1 ||
2432  (index == vacrel->lpdead_items &&
2433  vacuumed_pages == vacrel->lpdead_item_pages));
2434 
2435  ereport(DEBUG2,
2436  (errmsg("table \"%s\": removed %lld dead item identifiers in %u pages",
2437  vacrel->relname, (long long) index, vacuumed_pages)));
2438 
2439  /* Revert to the previous phase information for error traceback */
2440  restore_vacuum_error_info(vacrel, &saved_err_info);
2441 }
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:32

References Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetPage(), BufferIsValid(), LVRelState::dead_items, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, ereport, errmsg(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, ItemPointerGetBlockNumber(), VacDeadItems::items, lazy_vacuum_heap_page(), LockBuffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, LVRelState::num_index_scans, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelState::rel, ReleaseBuffer(), LVRelState::relname, restore_vacuum_error_info(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), and VACUUM_ERRCB_PHASE_VACUUM_HEAP.

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 2672 of file vacuumlazy.c.

2674 {
2675  IndexVacuumInfo ivinfo;
2676  LVSavedErrInfo saved_err_info;
2677 
2678  ivinfo.index = indrel;
2679  ivinfo.analyze_only = false;
2680  ivinfo.report_progress = false;
2681  ivinfo.estimated_count = true;
2682  ivinfo.message_level = DEBUG2;
2683  ivinfo.num_heap_tuples = reltuples;
2684  ivinfo.strategy = vacrel->bstrategy;
2685 
2686  /*
2687  * Update error traceback information.
2688  *
2689  * The index name is saved during this phase and restored immediately
2690  * after this phase. See vacuum_error_callback.
2691  */
2692  Assert(vacrel->indname == NULL);
2693  vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2694  update_vacuum_error_info(vacrel, &saved_err_info,
2697 
2698  /* Do bulk deletion */
2699  istat = vac_bulkdel_one_index(&ivinfo, istat, (void *) vacrel->dead_items);
2700 
2701  /* Revert to the previous phase information for error traceback */
2702  restore_vacuum_error_info(vacrel, &saved_err_info);
2703  pfree(vacrel->indname);
2704  vacrel->indname = NULL;
2705 
2706  return istat;
2707 }
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, VacDeadItems *dead_items)
Definition: vacuum.c:2285

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, LVRelState::dead_items, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3429 of file vacuumlazy.c.

3431 {
3432  vacrel->blkno = saved_vacrel->blkno;
3433  vacrel->offnum = saved_vacrel->offnum;
3434  vacrel->phase = saved_vacrel->phase;
3435 }
BlockNumber blkno
Definition: vacuumlazy.c:244
VacErrPhase phase
Definition: vacuumlazy.c:246
OffsetNumber offnum
Definition: vacuumlazy.c:245

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 2782 of file vacuumlazy.c.

2783 {
2784  BlockNumber possibly_freeable;
2785 
2786  if (!vacrel->do_rel_truncate || vacrel->failsafe_active ||
2788  return false;
2789 
2790  possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
2791  if (possibly_freeable > 0 &&
2792  (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
2793  possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION))
2794  return true;
2795 
2796  return false;
2797 }
int old_snapshot_threshold
Definition: snapmgr.c:79
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:76
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:77

References LVRelState::do_rel_truncate, LVRelState::failsafe_active, LVRelState::nonempty_pages, old_snapshot_threshold, LVRelState::rel_pages, REL_TRUNCATE_FRACTION, and REL_TRUNCATE_MINIMUM.

Referenced by heap_vacuum_rel().

◆ update_relstats_all_indexes()

static void update_relstats_all_indexes ( LVRelState vacrel)
static

Definition at line 3311 of file vacuumlazy.c.

3312 {
3313  Relation *indrels = vacrel->indrels;
3314  int nindexes = vacrel->nindexes;
3315  IndexBulkDeleteResult **indstats = vacrel->indstats;
3316 
3317  Assert(vacrel->do_index_cleanup);
3318 
3319  for (int idx = 0; idx < nindexes; idx++)
3320  {
3321  Relation indrel = indrels[idx];
3322  IndexBulkDeleteResult *istat = indstats[idx];
3323 
3324  if (istat == NULL || istat->estimated_count)
3325  continue;
3326 
3327  /* Update index statistics */
3328  vac_update_relstats(indrel,
3329  istat->num_pages,
3330  istat->num_index_tuples,
3331  0,
3332  false,
3335  NULL, NULL, false);
3336  }
3337 }
bool estimated_count
Definition: genam.h:77
double num_index_tuples
Definition: genam.h:78

References Assert(), LVRelState::do_index_cleanup, IndexBulkDeleteResult::estimated_count, idx(), LVRelState::indrels, LVRelState::indstats, InvalidMultiXactId, InvalidTransactionId, LVRelState::nindexes, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by heap_vacuum_rel().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3410 of file vacuumlazy.c.

3412 {
3413  if (saved_vacrel)
3414  {
3415  saved_vacrel->offnum = vacrel->offnum;
3416  saved_vacrel->blkno = vacrel->blkno;
3417  saved_vacrel->phase = vacrel->phase;
3418  }
3419 
3420  vacrel->blkno = blkno;
3421  vacrel->offnum = offnum;
3422  vacrel->phase = phase;
3423 }

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void *  arg)
static

Definition at line 3346 of file vacuumlazy.c.

3347 {
3348  LVRelState *errinfo = arg;
3349 
3350  switch (errinfo->phase)
3351  {
3353  if (BlockNumberIsValid(errinfo->blkno))
3354  {
3355  if (OffsetNumberIsValid(errinfo->offnum))
3356  errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3357  errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3358  else
3359  errcontext("while scanning block %u of relation \"%s.%s\"",
3360  errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3361  }
3362  else
3363  errcontext("while scanning relation \"%s.%s\"",
3364  errinfo->relnamespace, errinfo->relname);
3365  break;
3366 
3368  if (BlockNumberIsValid(errinfo->blkno))
3369  {
3370  if (OffsetNumberIsValid(errinfo->offnum))
3371  errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3372  errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3373  else
3374  errcontext("while vacuuming block %u of relation \"%s.%s\"",
3375  errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3376  }
3377  else
3378  errcontext("while vacuuming relation \"%s.%s\"",
3379  errinfo->relnamespace, errinfo->relname);
3380  break;
3381 
3383  errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3384  errinfo->indname, errinfo->relnamespace, errinfo->relname);
3385  break;
3386 
3388  errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3389  errinfo->indname, errinfo->relnamespace, errinfo->relname);
3390  break;
3391 
3393  if (BlockNumberIsValid(errinfo->blkno))
3394  errcontext("while truncating relation \"%s.%s\" to %u blocks",
3395  errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3396  break;
3397 
3399  default:
3400  return; /* do nothing; the errinfo may not be
3401  * initialized */
3402  }
3403 }
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define errcontext
Definition: elog.h:192
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
void * arg

References arg, LVRelState::blkno, BlockNumberIsValid(), errcontext, LVRelState::indname, LVRelState::offnum, OffsetNumberIsValid, LVRelState::phase, LVRelState::relname, LVRelState::relnamespace, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().