PostgreSQL Source Code  git master
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/amapi.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
#include "optimizer/paths.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "tcop/tcopprot.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVDeadTuples
 
struct  LVShared
 
struct  LVSharedIndStats
 
struct  LVParallelState
 
struct  LVRelStats
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define VACUUM_FSM_EVERY_PAGES   ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define LAZY_ALLOC_TUPLES   MaxHeapTuplesPerPage
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define PARALLEL_VACUUM_KEY_SHARED   1
 
#define PARALLEL_VACUUM_KEY_DEAD_TUPLES   2
 
#define PARALLEL_VACUUM_KEY_QUERY_TEXT   3
 
#define ParallelVacuumIsActive(lps)   PointerIsValid(lps)
 
#define SizeOfDeadTuples(cnt)
 
#define MAXDEADTUPLES(max_size)   (((max_size) - offsetof(LVDeadTuples, itemptrs)) / sizeof(ItemPointerData))
 
#define SizeOfLVShared   (offsetof(LVShared, bitmap) + sizeof(bits8))
 
#define GetSharedIndStats(s)   ((LVSharedIndStats *)((char *)(s) + ((LVShared *)(s))->offset))
 
#define IndStatsIsNull(s, i)   (!(((LVShared *)(s))->bitmap[(i) >> 3] & (1 << ((i) & 0x07))))
 
#define FORCE_CHECK_PAGE()   (blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))
 

Typedefs

typedef struct LVDeadTuples LVDeadTuples
 
typedef struct LVShared LVShared
 
typedef struct LVSharedIndStats LVSharedIndStats
 
typedef struct LVParallelState LVParallelState
 
typedef struct LVRelStats LVRelStats
 

Functions

static void lazy_scan_heap (Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, Relation *Irel, int nindexes, bool aggressive)
 
static void lazy_vacuum_heap (Relation onerel, LVRelStats *vacrelstats)
 
static bool lazy_check_needs_freeze (Buffer buf, bool *hastup)
 
static void lazy_vacuum_all_indexes (Relation onerel, Relation *Irel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes)
 
static void lazy_vacuum_index (Relation indrel, IndexBulkDeleteResult **stats, LVDeadTuples *dead_tuples, double reltuples)
 
static void lazy_cleanup_index (Relation indrel, IndexBulkDeleteResult **stats, double reltuples, bool estimated_count)
 
static int lazy_vacuum_page (Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
 
static bool should_attempt_truncation (VacuumParams *params, LVRelStats *vacrelstats)
 
static void lazy_truncate_heap (Relation onerel, LVRelStats *vacrelstats)
 
static BlockNumber count_nondeletable_pages (Relation onerel, LVRelStats *vacrelstats)
 
static void lazy_space_alloc (LVRelStats *vacrelstats, BlockNumber relblocks)
 
static void lazy_record_dead_tuple (LVDeadTuples *dead_tuples, ItemPointer itemptr)
 
static bool lazy_tid_reaped (ItemPointer itemptr, void *state)
 
static int vac_cmp_itemptr (const void *left, const void *right)
 
static bool heap_page_is_all_visible (Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
static void lazy_parallel_vacuum_indexes (Relation *Irel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes)
 
static void parallel_vacuum_index (Relation *Irel, IndexBulkDeleteResult **stats, LVShared *lvshared, LVDeadTuples *dead_tuples, int nindexes)
 
static void vacuum_indexes_leader (Relation *Irel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes)
 
static void vacuum_one_index (Relation indrel, IndexBulkDeleteResult **stats, LVShared *lvshared, LVSharedIndStats *shared_indstats, LVDeadTuples *dead_tuples)
 
static void lazy_cleanup_all_indexes (Relation *Irel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes)
 
static long compute_max_dead_tuples (BlockNumber relblocks, bool hasindex)
 
static int compute_parallel_vacuum_workers (Relation *Irel, int nindexes, int nrequested, bool *can_parallel_vacuum)
 
static void prepare_index_statistics (LVShared *lvshared, bool *can_parallel_vacuum, int nindexes)
 
static void update_index_statistics (Relation *Irel, IndexBulkDeleteResult **stats, int nindexes)
 
static LVParallelStatebegin_parallel_vacuum (Oid relid, Relation *Irel, LVRelStats *vacrelstats, BlockNumber nblocks, int nindexes, int nrequested)
 
static void end_parallel_vacuum (Relation *Irel, IndexBulkDeleteResult **stats, LVParallelState *lps, int nindexes)
 
static LVSharedIndStatsget_indstats (LVShared *lvshared, int n)
 
static bool skip_parallel_vacuum_index (Relation indrel, LVShared *lvshared)
 
void heap_vacuum_rel (Relation onerel, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static void vacuum_log_cleanup_info (Relation rel, LVRelStats *vacrelstats)
 
void parallel_vacuum_main (dsm_segment *seg, shm_toc *toc)
 

Variables

static int elevel = -1
 
static TransactionId OldestXmin
 
static TransactionId FreezeLimit
 
static MultiXactId MultiXactCutoff
 
static BufferAccessStrategy vac_strategy
 

Macro Definition Documentation

◆ FORCE_CHECK_PAGE

#define FORCE_CHECK_PAGE ( )    (blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))

Referenced by lazy_scan_heap().

◆ GetSharedIndStats

#define GetSharedIndStats (   s)    ((LVSharedIndStats *)((char *)(s) + ((LVShared *)(s))->offset))

Definition at line 239 of file vacuumlazy.c.

Referenced by get_indstats().

◆ IndStatsIsNull

#define IndStatsIsNull (   s,
  i 
)    (!(((LVShared *)(s))->bitmap[(i) >> 3] & (1 << ((i) & 0x07))))

Definition at line 241 of file vacuumlazy.c.

Referenced by get_indstats().

◆ LAZY_ALLOC_TUPLES

#define LAZY_ALLOC_TUPLES   MaxHeapTuplesPerPage

Definition at line 118 of file vacuumlazy.c.

Referenced by compute_max_dead_tuples().

◆ MAXDEADTUPLES

#define MAXDEADTUPLES (   max_size)    (((max_size) - offsetof(LVDeadTuples, itemptrs)) / sizeof(ItemPointerData))

Definition at line 166 of file vacuumlazy.c.

Referenced by compute_max_dead_tuples().

◆ PARALLEL_VACUUM_KEY_DEAD_TUPLES

#define PARALLEL_VACUUM_KEY_DEAD_TUPLES   2

Definition at line 138 of file vacuumlazy.c.

Referenced by begin_parallel_vacuum(), and parallel_vacuum_main().

◆ PARALLEL_VACUUM_KEY_QUERY_TEXT

#define PARALLEL_VACUUM_KEY_QUERY_TEXT   3

Definition at line 139 of file vacuumlazy.c.

Referenced by begin_parallel_vacuum(), and parallel_vacuum_main().

◆ PARALLEL_VACUUM_KEY_SHARED

#define PARALLEL_VACUUM_KEY_SHARED   1

Definition at line 137 of file vacuumlazy.c.

Referenced by begin_parallel_vacuum(), and parallel_vacuum_main().

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   lps)    PointerIsValid(lps)

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 130 of file vacuumlazy.c.

Referenced by count_nondeletable_pages().

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 91 of file vacuumlazy.c.

Referenced by should_attempt_truncation().

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 90 of file vacuumlazy.c.

Referenced by should_attempt_truncation().

◆ SizeOfDeadTuples

#define SizeOfDeadTuples (   cnt)
Value:
mul_size(sizeof(ItemPointerData), cnt))
Size mul_size(Size s1, Size s2)
Definition: shmem.c:515
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
#define offsetof(type, field)
Definition: c.h:661

Definition at line 163 of file vacuumlazy.c.

Referenced by begin_parallel_vacuum(), and lazy_space_alloc().

◆ SizeOfLVShared

#define SizeOfLVShared   (offsetof(LVShared, bitmap) + sizeof(bits8))

Definition at line 238 of file vacuumlazy.c.

Referenced by begin_parallel_vacuum().

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 124 of file vacuumlazy.c.

Referenced by lazy_scan_heap().

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES   ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 110 of file vacuumlazy.c.

Referenced by lazy_scan_heap().

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 100 of file vacuumlazy.c.

Referenced by count_nondeletable_pages().

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 102 of file vacuumlazy.c.

Referenced by lazy_truncate_heap().

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 101 of file vacuumlazy.c.

Referenced by lazy_truncate_heap().

Typedef Documentation

◆ LVDeadTuples

typedef struct LVDeadTuples LVDeadTuples

◆ LVParallelState

◆ LVRelStats

typedef struct LVRelStats LVRelStats

◆ LVShared

typedef struct LVShared LVShared

◆ LVSharedIndStats

Function Documentation

◆ begin_parallel_vacuum()

static LVParallelState * begin_parallel_vacuum ( Oid  relid,
Relation Irel,
LVRelStats vacrelstats,
BlockNumber  nblocks,
int  nindexes,
int  nrequested 
)
static

Definition at line 3067 of file vacuumlazy.c.

References LVShared::active_nworkers, add_size(), IndexAmRoutine::amparallelvacuumoptions, Assert, BITMAPLEN, compute_max_dead_tuples(), compute_parallel_vacuum_workers(), LVShared::cost_balance, CreateParallelContext(), LVRelStats::dead_tuples, debug_query_string, LVShared::elevel, elevel, EnterParallelMode(), ParallelContext::estimator, i, LVShared::idx, InitializeParallelDSM(), LVDeadTuples::itemptrs, LVParallelState::lvshared, maintenance_work_mem, LVShared::maintenance_work_mem_worker, LVDeadTuples::max_tuples, MAXALIGN, MemSet, Min, LVParallelState::nindexes_parallel_bulkdel, LVParallelState::nindexes_parallel_cleanup, LVParallelState::nindexes_parallel_condcleanup, LVDeadTuples::num_tuples, ParallelContext::nworkers, LVShared::offset, palloc0(), PARALLEL_VACUUM_KEY_DEAD_TUPLES, PARALLEL_VACUUM_KEY_QUERY_TEXT, PARALLEL_VACUUM_KEY_SHARED, LVParallelState::pcxt, pfree(), pg_atomic_init_u32(), prepare_index_statistics(), RelationData::rd_indam, LVShared::relid, shm_toc_allocate(), shm_toc_estimate_chunk, shm_toc_estimate_keys, shm_toc_insert(), SizeOfDeadTuples, SizeOfLVShared, ParallelContext::toc, VACUUM_OPTION_MAX_VALID_VALUE, VACUUM_OPTION_PARALLEL_BULKDEL, VACUUM_OPTION_PARALLEL_CLEANUP, and VACUUM_OPTION_PARALLEL_COND_CLEANUP.

Referenced by lazy_scan_heap().

3069 {
3070  LVParallelState *lps = NULL;
3071  ParallelContext *pcxt;
3072  LVShared *shared;
3073  LVDeadTuples *dead_tuples;
3074  bool *can_parallel_vacuum;
3075  long maxtuples;
3076  char *sharedquery;
3077  Size est_shared;
3078  Size est_deadtuples;
3079  int nindexes_mwm = 0;
3080  int parallel_workers = 0;
3081  int querylen;
3082  int i;
3083 
3084  /*
3085  * A parallel vacuum must be requested and there must be indexes on the
3086  * relation
3087  */
3088  Assert(nrequested >= 0);
3089  Assert(nindexes > 0);
3090 
3091  /*
3092  * Compute the number of parallel vacuum workers to launch
3093  */
3094  can_parallel_vacuum = (bool *) palloc0(sizeof(bool) * nindexes);
3095  parallel_workers = compute_parallel_vacuum_workers(Irel, nindexes,
3096  nrequested,
3097  can_parallel_vacuum);
3098 
3099  /* Can't perform vacuum in parallel */
3100  if (parallel_workers <= 0)
3101  {
3102  pfree(can_parallel_vacuum);
3103  return lps;
3104  }
3105 
3106  lps = (LVParallelState *) palloc0(sizeof(LVParallelState));
3107 
3109  pcxt = CreateParallelContext("postgres", "parallel_vacuum_main",
3110  parallel_workers);
3111  Assert(pcxt->nworkers > 0);
3112  lps->pcxt = pcxt;
3113 
3114  /* Estimate size for shared information -- PARALLEL_VACUUM_KEY_SHARED */
3115  est_shared = MAXALIGN(add_size(SizeOfLVShared, BITMAPLEN(nindexes)));
3116  for (i = 0; i < nindexes; i++)
3117  {
3118  uint8 vacoptions = Irel[i]->rd_indam->amparallelvacuumoptions;
3119 
3120  /*
3121  * Cleanup option should be either disabled, always performing in
3122  * parallel or conditionally performing in parallel.
3123  */
3124  Assert(((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) == 0) ||
3125  ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) == 0));
3126  Assert(vacoptions <= VACUUM_OPTION_MAX_VALID_VALUE);
3127 
3128  /* Skip indexes that don't participate in parallel vacuum */
3129  if (!can_parallel_vacuum[i])
3130  continue;
3131 
3132  if (Irel[i]->rd_indam->amusemaintenanceworkmem)
3133  nindexes_mwm++;
3134 
3135  est_shared = add_size(est_shared, sizeof(LVSharedIndStats));
3136 
3137  /*
3138  * Remember the number of indexes that support parallel operation for
3139  * each phase.
3140  */
3141  if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) != 0)
3143  if ((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) != 0)
3145  if ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) != 0)
3147  }
3148  shm_toc_estimate_chunk(&pcxt->estimator, est_shared);
3149  shm_toc_estimate_keys(&pcxt->estimator, 1);
3150 
3151  /* Estimate size for dead tuples -- PARALLEL_VACUUM_KEY_DEAD_TUPLES */
3152  maxtuples = compute_max_dead_tuples(nblocks, true);
3153  est_deadtuples = MAXALIGN(SizeOfDeadTuples(maxtuples));
3154  shm_toc_estimate_chunk(&pcxt->estimator, est_deadtuples);
3155  shm_toc_estimate_keys(&pcxt->estimator, 1);
3156 
3157  /* Finally, estimate PARALLEL_VACUUM_KEY_QUERY_TEXT space */
3158  querylen = strlen(debug_query_string);
3159  shm_toc_estimate_chunk(&pcxt->estimator, querylen + 1);
3160  shm_toc_estimate_keys(&pcxt->estimator, 1);
3161 
3162  InitializeParallelDSM(pcxt);
3163 
3164  /* Prepare shared information */
3165  shared = (LVShared *) shm_toc_allocate(pcxt->toc, est_shared);
3166  MemSet(shared, 0, est_shared);
3167  shared->relid = relid;
3168  shared->elevel = elevel;
3169  shared->maintenance_work_mem_worker =
3170  (nindexes_mwm > 0) ?
3171  maintenance_work_mem / Min(parallel_workers, nindexes_mwm) :
3173 
3174  pg_atomic_init_u32(&(shared->cost_balance), 0);
3175  pg_atomic_init_u32(&(shared->active_nworkers), 0);
3176  pg_atomic_init_u32(&(shared->idx), 0);
3177  shared->offset = MAXALIGN(add_size(SizeOfLVShared, BITMAPLEN(nindexes)));
3178  prepare_index_statistics(shared, can_parallel_vacuum, nindexes);
3179 
3181  lps->lvshared = shared;
3182 
3183  /* Prepare the dead tuple space */
3184  dead_tuples = (LVDeadTuples *) shm_toc_allocate(pcxt->toc, est_deadtuples);
3185  dead_tuples->max_tuples = maxtuples;
3186  dead_tuples->num_tuples = 0;
3187  MemSet(dead_tuples->itemptrs, 0, sizeof(ItemPointerData) * maxtuples);
3188  shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_DEAD_TUPLES, dead_tuples);
3189  vacrelstats->dead_tuples = dead_tuples;
3190 
3191  /* Store query string for workers */
3192  sharedquery = (char *) shm_toc_allocate(pcxt->toc, querylen + 1);
3193  memcpy(sharedquery, debug_query_string, querylen + 1);
3194  sharedquery[querylen] = '\0';
3195  shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_QUERY_TEXT, sharedquery);
3196 
3197  pfree(can_parallel_vacuum);
3198  return lps;
3199 }
uint8 amparallelvacuumoptions
Definition: amapi.h:203
struct IndexAmRoutine * rd_indam
Definition: rel.h:168
static int compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested, bool *can_parallel_vacuum)
Definition: vacuumlazy.c:2950
ItemPointerData itemptrs[FLEXIBLE_ARRAY_MEMBER]
Definition: vacuumlazy.c:158
ParallelContext * CreateParallelContext(const char *library_name, const char *function_name, int nworkers)
Definition: parallel.c:162
LVDeadTuples * dead_tuples
Definition: vacuumlazy.c:289
#define PARALLEL_VACUUM_KEY_DEAD_TUPLES
Definition: vacuumlazy.c:138
Oid relid
Definition: vacuumlazy.c:179
#define SizeOfDeadTuples(cnt)
Definition: vacuumlazy.c:163
shm_toc_estimator estimator
Definition: parallel.h:42
#define Min(x, y)
Definition: c.h:920
unsigned char uint8
Definition: c.h:365
#define VACUUM_OPTION_MAX_VALID_VALUE
Definition: vacuum.h:63
#define MemSet(start, val, len)
Definition: c.h:971
static long compute_max_dead_tuples(BlockNumber relblocks, bool hasindex)
Definition: vacuumlazy.c:2706
#define BITMAPLEN(NATTS)
Definition: htup_details.h:547
int nindexes_parallel_bulkdel
Definition: vacuumlazy.c:266
int maintenance_work_mem_worker
Definition: vacuumlazy.c:210
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
int nindexes_parallel_condcleanup
Definition: vacuumlazy.c:268
#define PARALLEL_VACUUM_KEY_QUERY_TEXT
Definition: vacuumlazy.c:139
ParallelContext * pcxt
Definition: vacuumlazy.c:257
#define SizeOfLVShared
Definition: vacuumlazy.c:238
pg_atomic_uint32 cost_balance
Definition: vacuumlazy.c:217
void pfree(void *pointer)
Definition: mcxt.c:1056
int elevel
Definition: vacuumlazy.c:180
pg_atomic_uint32 idx
Definition: vacuumlazy.c:231
const char * debug_query_string
Definition: postgres.c:88
static void prepare_index_statistics(LVShared *lvshared, bool *can_parallel_vacuum, int nindexes)
Definition: vacuumlazy.c:3011
void InitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:200
static int elevel
Definition: vacuumlazy.c:297
int nindexes_parallel_cleanup
Definition: vacuumlazy.c:267
void * palloc0(Size size)
Definition: mcxt.c:980
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
#define PARALLEL_VACUUM_KEY_SHARED
Definition: vacuumlazy.c:137
pg_atomic_uint32 active_nworkers
Definition: vacuumlazy.c:224
int maintenance_work_mem
Definition: globals.c:122
#define Assert(condition)
Definition: c.h:738
#define VACUUM_OPTION_PARALLEL_COND_CLEANUP
Definition: vacuum.h:52
size_t Size
Definition: c.h:466
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
#define MAXALIGN(LEN)
Definition: c.h:691
void EnterParallelMode(void)
Definition: xact.c:963
LVShared * lvshared
Definition: vacuumlazy.c:260
#define VACUUM_OPTION_PARALLEL_BULKDEL
Definition: vacuum.h:45
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
int i
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
uint32 offset
Definition: vacuumlazy.c:232
#define VACUUM_OPTION_PARALLEL_CLEANUP
Definition: vacuum.h:60
shm_toc * toc
Definition: parallel.h:45

◆ compute_max_dead_tuples()

static long compute_max_dead_tuples ( BlockNumber  relblocks,
bool  hasindex 
)
static

Definition at line 2706 of file vacuumlazy.c.

References autovacuum_work_mem, IsAutoVacuumWorkerProcess(), LAZY_ALLOC_TUPLES, maintenance_work_mem, Max, MaxAllocSize, MAXDEADTUPLES, MaxHeapTuplesPerPage, and Min.

Referenced by begin_parallel_vacuum(), and lazy_space_alloc().

2707 {
2708  long maxtuples;
2709  int vac_work_mem = IsAutoVacuumWorkerProcess() &&
2710  autovacuum_work_mem != -1 ?
2712 
2713  if (useindex)
2714  {
2715  maxtuples = MAXDEADTUPLES(vac_work_mem * 1024L);
2716  maxtuples = Min(maxtuples, INT_MAX);
2717  maxtuples = Min(maxtuples, MAXDEADTUPLES(MaxAllocSize));
2718 
2719  /* curious coding here to ensure the multiplication can't overflow */
2720  if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
2721  maxtuples = relblocks * LAZY_ALLOC_TUPLES;
2722 
2723  /* stay sane if small maintenance_work_mem */
2724  maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
2725  }
2726  else
2727  maxtuples = MaxHeapTuplesPerPage;
2728 
2729  return maxtuples;
2730 }
int autovacuum_work_mem
Definition: autovacuum.c:116
#define Min(x, y)
Definition: c.h:920
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
uint32 BlockNumber
Definition: block.h:31
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3302
#define MaxAllocSize
Definition: memutils.h:40
int maintenance_work_mem
Definition: globals.c:122
#define Max(x, y)
Definition: c.h:914
#define MAXDEADTUPLES(max_size)
Definition: vacuumlazy.c:166
#define LAZY_ALLOC_TUPLES
Definition: vacuumlazy.c:118

◆ compute_parallel_vacuum_workers()

static int compute_parallel_vacuum_workers ( Relation Irel,
int  nindexes,
int  nrequested,
bool can_parallel_vacuum 
)
static

Definition at line 2950 of file vacuumlazy.c.

References IndexAmRoutine::amparallelvacuumoptions, i, IsUnderPostmaster, Max, max_parallel_maintenance_workers, Min, min_parallel_index_scan_size, RelationData::rd_indam, RelationGetNumberOfBlocks, VACUUM_OPTION_NO_PARALLEL, VACUUM_OPTION_PARALLEL_BULKDEL, VACUUM_OPTION_PARALLEL_CLEANUP, and VACUUM_OPTION_PARALLEL_COND_CLEANUP.

Referenced by begin_parallel_vacuum().

2952 {
2953  int nindexes_parallel = 0;
2954  int nindexes_parallel_bulkdel = 0;
2955  int nindexes_parallel_cleanup = 0;
2956  int parallel_workers;
2957  int i;
2958 
2959  /*
2960  * We don't allow to perform parallel operation in standalone backend or
2961  * when parallelism is disabled.
2962  */
2964  return 0;
2965 
2966  /*
2967  * Compute the number of indexes that can participate in parallel vacuum.
2968  */
2969  for (i = 0; i < nindexes; i++)
2970  {
2971  uint8 vacoptions = Irel[i]->rd_indam->amparallelvacuumoptions;
2972 
2973  if (vacoptions == VACUUM_OPTION_NO_PARALLEL ||
2975  continue;
2976 
2977  can_parallel_vacuum[i] = true;
2978 
2979  if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) != 0)
2980  nindexes_parallel_bulkdel++;
2981  if (((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) != 0) ||
2982  ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) != 0))
2983  nindexes_parallel_cleanup++;
2984  }
2985 
2986  nindexes_parallel = Max(nindexes_parallel_bulkdel,
2987  nindexes_parallel_cleanup);
2988 
2989  /* The leader process takes one index */
2990  nindexes_parallel--;
2991 
2992  /* No index supports parallel vacuum */
2993  if (nindexes_parallel <= 0)
2994  return 0;
2995 
2996  /* Compute the parallel degree */
2997  parallel_workers = (nrequested > 0) ?
2998  Min(nrequested, nindexes_parallel) : nindexes_parallel;
2999 
3000  /* Cap by max_parallel_maintenance_workers */
3001  parallel_workers = Min(parallel_workers, max_parallel_maintenance_workers);
3002 
3003  return parallel_workers;
3004 }
uint8 amparallelvacuumoptions
Definition: amapi.h:203
struct IndexAmRoutine * rd_indam
Definition: rel.h:168
#define Min(x, y)
Definition: c.h:920
unsigned char uint8
Definition: c.h:365
bool IsUnderPostmaster
Definition: globals.c:109
int min_parallel_index_scan_size
Definition: allpaths.c:65
#define VACUUM_OPTION_NO_PARALLEL
Definition: vacuum.h:39
int max_parallel_maintenance_workers
Definition: globals.c:123
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:195
#define Max(x, y)
Definition: c.h:914
#define VACUUM_OPTION_PARALLEL_COND_CLEANUP
Definition: vacuum.h:52
#define VACUUM_OPTION_PARALLEL_BULKDEL
Definition: vacuum.h:45
int i
#define VACUUM_OPTION_PARALLEL_CLEANUP
Definition: vacuum.h:60

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 2567 of file vacuumlazy.c.

References AccessExclusiveLock, buf, BUFFER_LOCK_SHARE, BufferGetPage, CHECK_FOR_INTERRUPTS, elevel, ereport, errmsg(), FirstOffsetNumber, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LVRelStats::lock_waiter_detected, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelStats::nonempty_pages, OffsetNumberNext, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, PageIsNew, PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelStats::rel_pages, RelationGetRelationName, StaticAssertStmt, UnlockReleaseBuffer(), and VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL.

Referenced by lazy_truncate_heap().

2568 {
2569  BlockNumber blkno;
2570  BlockNumber prefetchedUntil;
2571  instr_time starttime;
2572 
2573  /* Initialize the starttime if we check for conflicting lock requests */
2574  INSTR_TIME_SET_CURRENT(starttime);
2575 
2576  /*
2577  * Start checking blocks at what we believe relation end to be and move
2578  * backwards. (Strange coding of loop control is needed because blkno is
2579  * unsigned.) To make the scan faster, we prefetch a few blocks at a time
2580  * in forward direction, so that OS-level readahead can kick in.
2581  */
2582  blkno = vacrelstats->rel_pages;
2584  "prefetch size must be power of 2");
2585  prefetchedUntil = InvalidBlockNumber;
2586  while (blkno > vacrelstats->nonempty_pages)
2587  {
2588  Buffer buf;
2589  Page page;
2590  OffsetNumber offnum,
2591  maxoff;
2592  bool hastup;
2593 
2594  /*
2595  * Check if another process requests a lock on our relation. We are
2596  * holding an AccessExclusiveLock here, so they will be waiting. We
2597  * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
2598  * only check if that interval has elapsed once every 32 blocks to
2599  * keep the number of system calls and actual shared lock table
2600  * lookups to a minimum.
2601  */
2602  if ((blkno % 32) == 0)
2603  {
2604  instr_time currenttime;
2605  instr_time elapsed;
2606 
2607  INSTR_TIME_SET_CURRENT(currenttime);
2608  elapsed = currenttime;
2609  INSTR_TIME_SUBTRACT(elapsed, starttime);
2610  if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
2612  {
2614  {
2615  ereport(elevel,
2616  (errmsg("\"%s\": suspending truncate due to conflicting lock request",
2617  RelationGetRelationName(onerel))));
2618 
2619  vacrelstats->lock_waiter_detected = true;
2620  return blkno;
2621  }
2622  starttime = currenttime;
2623  }
2624  }
2625 
2626  /*
2627  * We don't insert a vacuum delay point here, because we have an
2628  * exclusive lock on the table which we want to hold for as short a
2629  * time as possible. We still need to check for interrupts however.
2630  */
2632 
2633  blkno--;
2634 
2635  /* If we haven't prefetched this lot yet, do so now. */
2636  if (prefetchedUntil > blkno)
2637  {
2638  BlockNumber prefetchStart;
2639  BlockNumber pblkno;
2640 
2641  prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
2642  for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
2643  {
2644  PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno);
2646  }
2647  prefetchedUntil = prefetchStart;
2648  }
2649 
2650  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
2652 
2653  /* In this phase we only need shared access to the buffer */
2655 
2656  page = BufferGetPage(buf);
2657 
2658  if (PageIsNew(page) || PageIsEmpty(page))
2659  {
2660  UnlockReleaseBuffer(buf);
2661  continue;
2662  }
2663 
2664  hastup = false;
2665  maxoff = PageGetMaxOffsetNumber(page);
2666  for (offnum = FirstOffsetNumber;
2667  offnum <= maxoff;
2668  offnum = OffsetNumberNext(offnum))
2669  {
2670  ItemId itemid;
2671 
2672  itemid = PageGetItemId(page, offnum);
2673 
2674  /*
2675  * Note: any non-unused item should be taken as a reason to keep
2676  * this page. We formerly thought that DEAD tuples could be
2677  * thrown away, but that's not so, because we'd not have cleaned
2678  * out their index entries.
2679  */
2680  if (ItemIdIsUsed(itemid))
2681  {
2682  hastup = true;
2683  break; /* can stop scanning */
2684  }
2685  } /* scan along page */
2686 
2687  UnlockReleaseBuffer(buf);
2688 
2689  /* Done scanning if we found a tuple here */
2690  if (hastup)
2691  return blkno + 1;
2692  }
2693 
2694  /*
2695  * If we fall out of the loop, all the previously-thought-to-be-empty
2696  * pages still are; we need not bother to look at the last known-nonempty
2697  * page.
2698  */
2699  return vacrelstats->nonempty_pages;
2700 }
#define PageIsEmpty(page)
Definition: bufpage.h:222
BlockNumber rel_pages
Definition: vacuumlazy.c:277
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:589
struct timeval instr_time
Definition: instr_time.h:150
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:852
#define PREFETCH_SIZE
Definition: vacuumlazy.c:130
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3345
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:170
static char * buf
Definition: pg_test_fsync.c:67
#define FirstOffsetNumber
Definition: off.h:27
#define RelationGetRelationName(relation)
Definition: rel.h:469
#define BufferGetPage(buffer)
Definition: bufmgr.h:157
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
static int elevel
Definition: vacuumlazy.c:297
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:100
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3559
#define ereport(elevel,...)
Definition: elog.h:144
void PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:478
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:345
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:303
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:205
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define InvalidBlockNumber
Definition: block.h:33
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:156
#define AccessExclusiveLock
Definition: lockdefs.h:45
BlockNumber nonempty_pages
Definition: vacuumlazy.c:288
#define PageIsNew(page)
Definition: bufpage.h:229
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:85
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:78
bool lock_waiter_detected
Definition: vacuumlazy.c:292

◆ end_parallel_vacuum()

static void end_parallel_vacuum ( Relation Irel,
IndexBulkDeleteResult **  stats,
LVParallelState lps,
int  nindexes 
)
static

Definition at line 3211 of file vacuumlazy.c.

References Assert, DestroyParallelContext(), ExitParallelMode(), get_indstats(), i, IsParallelWorker, LVParallelState::lvshared, palloc0(), LVParallelState::pcxt, pfree(), LVSharedIndStats::stats, and LVSharedIndStats::updated.

Referenced by lazy_scan_heap().

3213 {
3214  int i;
3215 
3217 
3218  /* Copy the updated statistics */
3219  for (i = 0; i < nindexes; i++)
3220  {
3221  LVSharedIndStats *indstats = get_indstats(lps->lvshared, i);
3222 
3223  /*
3224  * Skip unused slot. The statistics of this index are already stored
3225  * in local memory.
3226  */
3227  if (indstats == NULL)
3228  continue;
3229 
3230  if (indstats->updated)
3231  {
3232  stats[i] = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
3233  memcpy(stats[i], &(indstats->stats), sizeof(IndexBulkDeleteResult));
3234  }
3235  else
3236  stats[i] = NULL;
3237  }
3238 
3240  ExitParallelMode();
3241 
3242  /* Deactivate parallel vacuum */
3243  pfree(lps);
3244  lps = NULL;
3245 }
static LVSharedIndStats * get_indstats(LVShared *lvshared, int n)
Definition: vacuumlazy.c:3249
ParallelContext * pcxt
Definition: vacuumlazy.c:257
void DestroyParallelContext(ParallelContext *pcxt)
Definition: parallel.c:892
void pfree(void *pointer)
Definition: mcxt.c:1056
void ExitParallelMode(void)
Definition: xact.c:976
IndexBulkDeleteResult stats
Definition: vacuumlazy.c:251
#define IsParallelWorker()
Definition: parallel.h:61
void * palloc0(Size size)
Definition: mcxt.c:980
#define Assert(condition)
Definition: c.h:738
LVShared * lvshared
Definition: vacuumlazy.c:260
int i

◆ get_indstats()

static LVSharedIndStats * get_indstats ( LVShared lvshared,
int  n 
)
static

Definition at line 3249 of file vacuumlazy.c.

References GetSharedIndStats, i, and IndStatsIsNull.

Referenced by end_parallel_vacuum(), parallel_vacuum_index(), and vacuum_indexes_leader().

3250 {
3251  int i;
3252  char *p;
3253 
3254  if (IndStatsIsNull(lvshared, n))
3255  return NULL;
3256 
3257  p = (char *) GetSharedIndStats(lvshared);
3258  for (i = 0; i < n; i++)
3259  {
3260  if (IndStatsIsNull(lvshared, i))
3261  continue;
3262 
3263  p += sizeof(LVSharedIndStats);
3264  }
3265 
3266  return (LVSharedIndStats *) p;
3267 }
#define GetSharedIndStats(s)
Definition: vacuumlazy.c:239
#define IndStatsIsNull(s, i)
Definition: vacuumlazy.c:241
struct LVSharedIndStats LVSharedIndStats
int i

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( Relation  rel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool all_frozen 
)
static

Definition at line 2831 of file vacuumlazy.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, OffsetNumberNext, OldestXmin, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), and TransactionIdPrecedes().

Referenced by lazy_vacuum_page().

2834 {
2835  Page page = BufferGetPage(buf);
2837  OffsetNumber offnum,
2838  maxoff;
2839  bool all_visible = true;
2840 
2841  *visibility_cutoff_xid = InvalidTransactionId;
2842  *all_frozen = true;
2843 
2844  /*
2845  * This is a stripped down version of the line pointer scan in
2846  * lazy_scan_heap(). So if you change anything here, also check that code.
2847  */
2848  maxoff = PageGetMaxOffsetNumber(page);
2849  for (offnum = FirstOffsetNumber;
2850  offnum <= maxoff && all_visible;
2851  offnum = OffsetNumberNext(offnum))
2852  {
2853  ItemId itemid;
2854  HeapTupleData tuple;
2855 
2856  itemid = PageGetItemId(page, offnum);
2857 
2858  /* Unused or redirect line pointers are of no interest */
2859  if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
2860  continue;
2861 
2862  ItemPointerSet(&(tuple.t_self), blockno, offnum);
2863 
2864  /*
2865  * Dead line pointers can have index pointers pointing to them. So
2866  * they can't be treated as visible
2867  */
2868  if (ItemIdIsDead(itemid))
2869  {
2870  all_visible = false;
2871  *all_frozen = false;
2872  break;
2873  }
2874 
2875  Assert(ItemIdIsNormal(itemid));
2876 
2877  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2878  tuple.t_len = ItemIdGetLength(itemid);
2879  tuple.t_tableOid = RelationGetRelid(rel);
2880 
2881  switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
2882  {
2883  case HEAPTUPLE_LIVE:
2884  {
2885  TransactionId xmin;
2886 
2887  /* Check comments in lazy_scan_heap. */
2889  {
2890  all_visible = false;
2891  *all_frozen = false;
2892  break;
2893  }
2894 
2895  /*
2896  * The inserter definitely committed. But is it old enough
2897  * that everyone sees it as committed?
2898  */
2899  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
2900  if (!TransactionIdPrecedes(xmin, OldestXmin))
2901  {
2902  all_visible = false;
2903  *all_frozen = false;
2904  break;
2905  }
2906 
2907  /* Track newest xmin on page. */
2908  if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
2909  *visibility_cutoff_xid = xmin;
2910 
2911  /* Check whether this tuple is already frozen or not */
2912  if (all_visible && *all_frozen &&
2914  *all_frozen = false;
2915  }
2916  break;
2917 
2918  case HEAPTUPLE_DEAD:
2922  {
2923  all_visible = false;
2924  *all_frozen = false;
2925  break;
2926  }
2927  default:
2928  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2929  break;
2930  }
2931  } /* scan along page */
2932 
2933  return all_visible;
2934 }
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:334
uint32 TransactionId
Definition: c.h:513
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
uint32 BlockNumber
Definition: block.h:31
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:6773
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:324
uint32 t_len
Definition: htup.h:64
static char * buf
Definition: pg_test_fsync.c:67
#define FirstOffsetNumber
Definition: off.h:27
#define InvalidTransactionId
Definition: transam.h:31
static TransactionId OldestXmin
Definition: vacuumlazy.c:299
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:157
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define Assert(condition)
Definition: c.h:738
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2570
#define elog(elevel,...)
Definition: elog.h:214
#define RelationGetRelid(relation)
Definition: rel.h:435
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  onerel,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 376 of file vacuumlazy.c.

References _, appendStringInfo(), Assert, buf, StringInfoData::data, DEBUG1, DEBUG2, elevel, ereport, errmsg(), errmsg_internal(), VacuumParams::freeze_min_age, VacuumParams::freeze_table_age, FreezeLimit, LVRelStats::frozenskipped_pages, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), VacuumParams::index_cleanup, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsAutoVacuumWorkerProcess(), lazy_scan_heap(), lazy_truncate_heap(), LVRelStats::lock_waiter_detected, LOG, VacuumParams::log_min_duration, VacuumParams::multixact_freeze_min_age, VacuumParams::multixact_freeze_table_age, MultiXactCutoff, MultiXactIdIsValid, MultiXactIdPrecedesOrEquals(), MyDatabaseId, LVRelStats::new_dead_tuples, LVRelStats::new_live_tuples, LVRelStats::new_rel_tuples, NoLock, LVRelStats::num_index_scans, LVRelStats::old_live_tuples, LVRelStats::old_rel_pages, OldestXmin, VacuumParams::options, LVRelStats::pages_removed, palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), LVRelStats::pinskipped_pages, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, RelationData::rd_rel, LVRelStats::rel_pages, RelationGetNamespace, RelationGetRelationName, RelationGetRelid, RowExclusiveLock, LVRelStats::scanned_pages, should_attempt_truncation(), TimestampDifference(), TimestampDifferenceExceeds(), TransactionIdIsNormal, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelStats::tupcount_pages, LVRelStats::tuples_deleted, LVRelStats::useindex, vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_TERNARY_DEFAULT, VACOPT_TERNARY_ENABLED, VACOPT_VERBOSE, vacuum_set_xid_limits(), VacuumPageDirty, VacuumPageHit, VacuumPageMiss, and visibilitymap_count().

Referenced by SampleHeapTupleVisible().

378 {
379  LVRelStats *vacrelstats;
380  Relation *Irel;
381  int nindexes;
382  PGRUsage ru0;
383  TimestampTz starttime = 0;
384  long secs;
385  int usecs;
386  double read_rate,
387  write_rate;
388  bool aggressive; /* should we scan all unfrozen pages? */
389  bool scanned_all_unfrozen; /* actually scanned all such pages? */
390  TransactionId xidFullScanLimit;
391  MultiXactId mxactFullScanLimit;
392  BlockNumber new_rel_pages;
393  BlockNumber new_rel_allvisible;
394  double new_live_tuples;
395  TransactionId new_frozen_xid;
396  MultiXactId new_min_multi;
397 
398  Assert(params != NULL);
401 
402  /* not every AM requires these to be valid, but heap does */
403  Assert(TransactionIdIsNormal(onerel->rd_rel->relfrozenxid));
404  Assert(MultiXactIdIsValid(onerel->rd_rel->relminmxid));
405 
406  /* measure elapsed time iff autovacuum logging requires it */
407  if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
408  {
409  pg_rusage_init(&ru0);
410  starttime = GetCurrentTimestamp();
411  }
412 
413  if (params->options & VACOPT_VERBOSE)
414  elevel = INFO;
415  else
416  elevel = DEBUG2;
417 
419  RelationGetRelid(onerel));
420 
421  vac_strategy = bstrategy;
422 
423  vacuum_set_xid_limits(onerel,
424  params->freeze_min_age,
425  params->freeze_table_age,
426  params->multixact_freeze_min_age,
428  &OldestXmin, &FreezeLimit, &xidFullScanLimit,
429  &MultiXactCutoff, &mxactFullScanLimit);
430 
431  /*
432  * We request an aggressive scan if the table's frozen Xid is now older
433  * than or equal to the requested Xid full-table scan limit; or if the
434  * table's minimum MultiXactId is older than or equal to the requested
435  * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
436  */
437  aggressive = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
438  xidFullScanLimit);
439  aggressive |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
440  mxactFullScanLimit);
442  aggressive = true;
443 
444  /*
445  * Normally the relfrozenxid for an anti-wraparound vacuum will be old
446  * enough to force an aggressive vacuum. However, a concurrent vacuum
447  * might have already done this work that the relfrozenxid in relcache has
448  * been updated. If that happens this vacuum is redundant, so skip it.
449  */
450  if (params->is_wraparound && !aggressive)
451  {
452  ereport(DEBUG1,
453  (errmsg("skipping redundant vacuum to prevent wraparound of table \"%s.%s.%s\"",
456  RelationGetRelationName(onerel))));
458  return;
459  }
460 
461  vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
462 
463  vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
464  vacrelstats->old_live_tuples = onerel->rd_rel->reltuples;
465  vacrelstats->num_index_scans = 0;
466  vacrelstats->pages_removed = 0;
467  vacrelstats->lock_waiter_detected = false;
468 
469  /* Open all indexes of the relation */
470  vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
471  vacrelstats->useindex = (nindexes > 0 &&
473 
474  /* Do the vacuuming */
475  lazy_scan_heap(onerel, params, vacrelstats, Irel, nindexes, aggressive);
476 
477  /* Done with indexes */
478  vac_close_indexes(nindexes, Irel, NoLock);
479 
480  /*
481  * Compute whether we actually scanned the all unfrozen pages. If we did,
482  * we can adjust relfrozenxid and relminmxid.
483  *
484  * NB: We need to check this before truncating the relation, because that
485  * will change ->rel_pages.
486  */
487  if ((vacrelstats->scanned_pages + vacrelstats->frozenskipped_pages)
488  < vacrelstats->rel_pages)
489  {
490  Assert(!aggressive);
491  scanned_all_unfrozen = false;
492  }
493  else
494  scanned_all_unfrozen = true;
495 
496  /*
497  * Optionally truncate the relation.
498  */
499  if (should_attempt_truncation(params, vacrelstats))
500  lazy_truncate_heap(onerel, vacrelstats);
501 
502  /* Report that we are now doing final cleanup */
505 
506  /*
507  * Update statistics in pg_class.
508  *
509  * A corner case here is that if we scanned no pages at all because every
510  * page is all-visible, we should not update relpages/reltuples, because
511  * we have no new information to contribute. In particular this keeps us
512  * from replacing relpages=reltuples=0 (which means "unknown tuple
513  * density") with nonzero relpages and reltuples=0 (which means "zero
514  * tuple density") unless there's some actual evidence for the latter.
515  *
516  * It's important that we use tupcount_pages and not scanned_pages for the
517  * check described above; scanned_pages counts pages where we could not
518  * get cleanup lock, and which were processed only for frozenxid purposes.
519  *
520  * We do update relallvisible even in the corner case, since if the table
521  * is all-visible we'd definitely like to know that. But clamp the value
522  * to be not more than what we're setting relpages to.
523  *
524  * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
525  * since then we don't know for certain that all tuples have a newer xmin.
526  */
527  new_rel_pages = vacrelstats->rel_pages;
528  new_live_tuples = vacrelstats->new_live_tuples;
529  if (vacrelstats->tupcount_pages == 0 && new_rel_pages > 0)
530  {
531  new_rel_pages = vacrelstats->old_rel_pages;
532  new_live_tuples = vacrelstats->old_live_tuples;
533  }
534 
535  visibilitymap_count(onerel, &new_rel_allvisible, NULL);
536  if (new_rel_allvisible > new_rel_pages)
537  new_rel_allvisible = new_rel_pages;
538 
539  new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
540  new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
541 
542  vac_update_relstats(onerel,
543  new_rel_pages,
544  new_live_tuples,
545  new_rel_allvisible,
546  nindexes > 0,
547  new_frozen_xid,
548  new_min_multi,
549  false);
550 
551  /* report results to the stats collector, too */
553  onerel->rd_rel->relisshared,
554  new_live_tuples,
555  vacrelstats->new_dead_tuples);
557 
558  /* and log the action if appropriate */
559  if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
560  {
561  TimestampTz endtime = GetCurrentTimestamp();
562 
563  if (params->log_min_duration == 0 ||
564  TimestampDifferenceExceeds(starttime, endtime,
565  params->log_min_duration))
566  {
568  char *msgfmt;
569 
570  TimestampDifference(starttime, endtime, &secs, &usecs);
571 
572  read_rate = 0;
573  write_rate = 0;
574  if ((secs > 0) || (usecs > 0))
575  {
576  read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
577  (secs + usecs / 1000000.0);
578  write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
579  (secs + usecs / 1000000.0);
580  }
581 
582  /*
583  * This is pretty messy, but we split it up so that we can skip
584  * emitting individual parts of the message when not applicable.
585  */
586  initStringInfo(&buf);
587  if (params->is_wraparound)
588  {
589  /* an anti-wraparound vacuum has to be aggressive */
590  Assert(aggressive);
591  msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
592  }
593  else
594  {
595  if (aggressive)
596  msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
597  else
598  msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
599  }
600  appendStringInfo(&buf, msgfmt,
603  RelationGetRelationName(onerel),
604  vacrelstats->num_index_scans);
605  appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
606  vacrelstats->pages_removed,
607  vacrelstats->rel_pages,
608  vacrelstats->pinskipped_pages,
609  vacrelstats->frozenskipped_pages);
610  appendStringInfo(&buf,
611  _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n"),
612  vacrelstats->tuples_deleted,
613  vacrelstats->new_rel_tuples,
614  vacrelstats->new_dead_tuples,
615  OldestXmin);
616  appendStringInfo(&buf,
617  _("buffer usage: %lld hits, %lld misses, %lld dirtied\n"),
618  (long long) VacuumPageHit,
619  (long long) VacuumPageMiss,
620  (long long) VacuumPageDirty);
621  appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
622  read_rate, write_rate);
623  appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
624 
625  ereport(LOG,
626  (errmsg_internal("%s", buf.data)));
627  pfree(buf.data);
628  }
629  }
630 }
double new_rel_tuples
Definition: vacuumlazy.c:283
int multixact_freeze_table_age
Definition: vacuum.h:215
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:1976
int64 VacuumPageMiss
Definition: globals.c:144
#define DEBUG1
Definition: elog.h:25
BlockNumber rel_pages
Definition: vacuumlazy.c:277
uint32 TransactionId
Definition: c.h:513
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
Definition: pgstat.c:3140
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1583
double tuples_deleted
Definition: vacuumlazy.c:287
int64 TimestampTz
Definition: timestamp.h:39
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3161
bool useindex
Definition: vacuumlazy.c:274
int64 VacuumPageHit
Definition: globals.c:143
BlockNumber tupcount_pages
Definition: vacuumlazy.c:281
BlockNumber scanned_pages
Definition: vacuumlazy.c:278
#define INFO
Definition: elog.h:33
void vacuum_set_xid_limits(Relation rel, int freeze_min_age, int freeze_table_age, int multixact_freeze_min_age, int multixact_freeze_table_age, TransactionId *oldestXmin, TransactionId *freezeLimit, TransactionId *xidFullScanLimit, MultiXactId *multiXactCutoff, MultiXactId *mxactFullScanLimit)
Definition: vacuum.c:931
int64 VacuumPageDirty
Definition: globals.c:145
uint32 BlockNumber
Definition: block.h:31
BlockNumber pinskipped_pages
Definition: vacuumlazy.c:279
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
#define LOG
Definition: elog.h:26
Form_pg_class rd_rel
Definition: rel.h:89
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1682
int freeze_table_age
Definition: vacuum.h:212
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
BlockNumber old_rel_pages
Definition: vacuumlazy.c:276
void pfree(void *pointer)
Definition: mcxt.c:1056
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:319
int freeze_min_age
Definition: vacuum.h:211
bool is_wraparound
Definition: vacuum.h:217
char * get_database_name(Oid dbid)
Definition: dbcommands.c:2155
#define DEBUG2
Definition: elog.h:24
static TransactionId FreezeLimit
Definition: vacuumlazy.c:300
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3120
#define NoLock
Definition: lockdefs.h:34
static char * buf
Definition: pg_test_fsync.c:67
#define RowExclusiveLock
Definition: lockdefs.h:38
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:301
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:469
static TransactionId OldestXmin
Definition: vacuumlazy.c:299
#define MultiXactIdIsValid(multi)
Definition: multixact.h:27
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3302
double new_live_tuples
Definition: vacuumlazy.c:284
VacOptTernaryValue index_cleanup
Definition: vacuum.h:221
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:1933
static int elevel
Definition: vacuumlazy.c:297
void * palloc0(Size size)
Definition: mcxt.c:980
void pgstat_progress_end_command(void)
Definition: pgstat.c:3212
Oid MyDatabaseId
Definition: globals.c:85
#define InvalidMultiXactId
Definition: multixact.h:23
static bool should_attempt_truncation(VacuumParams *params, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:2417
VacOptTernaryValue truncate
Definition: vacuum.h:223
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:35
#define ereport(elevel,...)
Definition: elog.h:144
int num_index_scans
Definition: vacuumlazy.c:290
double old_live_tuples
Definition: vacuumlazy.c:282
TransactionId MultiXactId
Definition: c.h:523
int errmsg_internal(const char *fmt,...)
Definition: elog.c:911
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:303
#define Assert(condition)
Definition: c.h:738
double new_dead_tuples
Definition: vacuumlazy.c:285
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
int log_min_duration
Definition: vacuum.h:218
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
Definition: pgstat.c:1406
BlockNumber pages_removed
Definition: vacuumlazy.c:286
int errmsg(const char *fmt,...)
Definition: elog.c:824
static void lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, Relation *Irel, int nindexes, bool aggressive)
Definition: vacuumlazy.c:694
BlockNumber frozenskipped_pages
Definition: vacuumlazy.c:280
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3156
int options
Definition: vacuum.h:210
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1657
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool in_outer_xact)
Definition: vacuum.c:1208
#define _(x)
Definition: elog.c:88
#define RelationGetRelid(relation)
Definition: rel.h:435
int multixact_freeze_min_age
Definition: vacuum.h:213
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:2438
#define RelationGetNamespace(relation)
Definition: rel.h:476
bool lock_waiter_detected
Definition: vacuumlazy.c:292

◆ lazy_check_needs_freeze()

static bool lazy_check_needs_freeze ( Buffer  buf,
bool hastup 
)
static

Definition at line 1937 of file vacuumlazy.c.

References BufferGetPage, FirstOffsetNumber, FreezeLimit, heap_tuple_needs_freeze(), ItemIdIsNormal, ItemIdIsUsed, MultiXactCutoff, OffsetNumberNext, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, and PageIsNew.

Referenced by lazy_scan_heap().

1938 {
1939  Page page = BufferGetPage(buf);
1940  OffsetNumber offnum,
1941  maxoff;
1942  HeapTupleHeader tupleheader;
1943 
1944  *hastup = false;
1945 
1946  /*
1947  * New and empty pages, obviously, don't contain tuples. We could make
1948  * sure that the page is registered in the FSM, but it doesn't seem worth
1949  * waiting for a cleanup lock just for that, especially because it's
1950  * likely that the pin holder will do so.
1951  */
1952  if (PageIsNew(page) || PageIsEmpty(page))
1953  return false;
1954 
1955  maxoff = PageGetMaxOffsetNumber(page);
1956  for (offnum = FirstOffsetNumber;
1957  offnum <= maxoff;
1958  offnum = OffsetNumberNext(offnum))
1959  {
1960  ItemId itemid;
1961 
1962  itemid = PageGetItemId(page, offnum);
1963 
1964  /* this should match hastup test in count_nondeletable_pages() */
1965  if (ItemIdIsUsed(itemid))
1966  *hastup = true;
1967 
1968  /* dead and redirect items never need freezing */
1969  if (!ItemIdIsNormal(itemid))
1970  continue;
1971 
1972  tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1973 
1974  if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
1975  MultiXactCutoff, buf))
1976  return true;
1977  } /* scan along page */
1978 
1979  return false;
1980 }
#define PageIsEmpty(page)
Definition: bufpage.h:222
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
static TransactionId FreezeLimit
Definition: vacuumlazy.c:300
static char * buf
Definition: pg_test_fsync.c:67
#define FirstOffsetNumber
Definition: off.h:27
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:301
#define BufferGetPage(buffer)
Definition: bufmgr.h:157
bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
Definition: heapam.c:6826
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define PageIsNew(page)
Definition: bufpage.h:229
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( Relation Irel,
IndexBulkDeleteResult **  stats,
LVRelStats vacrelstats,
LVParallelState lps,
int  nindexes 
)
static

Definition at line 2261 of file vacuumlazy.c.

References Assert, LVShared::estimated_count, LVShared::first_time, LVShared::for_cleanup, idx(), IsParallelWorker, lazy_cleanup_index(), lazy_parallel_vacuum_indexes(), LVParallelState::lvshared, LVRelStats::new_rel_tuples, LVRelStats::num_index_scans, ParallelVacuumIsActive, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelStats::rel_pages, LVShared::reltuples, and LVRelStats::tupcount_pages.

Referenced by lazy_scan_heap().

2264 {
2265  int idx;
2266 
2268  Assert(nindexes > 0);
2269 
2270  /* Report that we are now cleaning up indexes */
2273 
2274  /*
2275  * If parallel vacuum is active we perform index cleanup with parallel
2276  * workers.
2277  */
2278  if (ParallelVacuumIsActive(lps))
2279  {
2280  /* Tell parallel workers to do index cleanup */
2281  lps->lvshared->for_cleanup = true;
2282  lps->lvshared->first_time =
2283  (vacrelstats->num_index_scans == 0);
2284 
2285  /*
2286  * Now we can provide a better estimate of total number of surviving
2287  * tuples (we assume indexes are more interested in that than in the
2288  * number of nominally live tuples).
2289  */
2290  lps->lvshared->reltuples = vacrelstats->new_rel_tuples;
2291  lps->lvshared->estimated_count =
2292  (vacrelstats->tupcount_pages < vacrelstats->rel_pages);
2293 
2294  lazy_parallel_vacuum_indexes(Irel, stats, vacrelstats, lps, nindexes);
2295  }
2296  else
2297  {
2298  for (idx = 0; idx < nindexes; idx++)
2299  lazy_cleanup_index(Irel[idx], &stats[idx],
2300  vacrelstats->new_rel_tuples,
2301  vacrelstats->tupcount_pages < vacrelstats->rel_pages);
2302  }
2303 }
double new_rel_tuples
Definition: vacuumlazy.c:283
static void lazy_cleanup_index(Relation indrel, IndexBulkDeleteResult **stats, double reltuples, bool estimated_count)
Definition: vacuumlazy.c:2355
BlockNumber rel_pages
Definition: vacuumlazy.c:277
bool estimated_count
Definition: vacuumlazy.c:200
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3161
BlockNumber tupcount_pages
Definition: vacuumlazy.c:281
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define IsParallelWorker()
Definition: parallel.h:61
bool first_time
Definition: vacuumlazy.c:188
double reltuples
Definition: vacuumlazy.c:199
static void lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes)
Definition: vacuumlazy.c:1989
#define ParallelVacuumIsActive(lps)
Definition: vacuumlazy.c:145
int num_index_scans
Definition: vacuumlazy.c:290
#define Assert(condition)
Definition: c.h:738
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:33
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
LVShared * lvshared
Definition: vacuumlazy.c:260
bool for_cleanup
Definition: vacuumlazy.c:187

◆ lazy_cleanup_index()

static void lazy_cleanup_index ( Relation  indrel,
IndexBulkDeleteResult **  stats,
double  reltuples,
bool  estimated_count 
)
static

Definition at line 2355 of file vacuumlazy.c.

References IndexVacuumInfo::analyze_only, elevel, ereport, errdetail(), errmsg(), IndexVacuumInfo::estimated_count, gettext_noop, IndexVacuumInfo::index, index_vacuum_cleanup(), IsParallelWorker, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pg_rusage_init(), pg_rusage_show(), RelationGetRelationName, IndexVacuumInfo::report_progress, IndexVacuumInfo::strategy, and vac_strategy.

Referenced by lazy_cleanup_all_indexes(), and vacuum_one_index().

2358 {
2359  IndexVacuumInfo ivinfo;
2360  const char *msg;
2361  PGRUsage ru0;
2362 
2363  pg_rusage_init(&ru0);
2364 
2365  ivinfo.index = indrel;
2366  ivinfo.analyze_only = false;
2367  ivinfo.report_progress = false;
2368  ivinfo.estimated_count = estimated_count;
2369  ivinfo.message_level = elevel;
2370 
2371  ivinfo.num_heap_tuples = reltuples;
2372  ivinfo.strategy = vac_strategy;
2373 
2374  *stats = index_vacuum_cleanup(&ivinfo, *stats);
2375 
2376  if (!(*stats))
2377  return;
2378 
2379  if (IsParallelWorker())
2380  msg = gettext_noop("index \"%s\" now contains %.0f row versions in %u pages as reported by parallel vacuum worker");
2381  else
2382  msg = gettext_noop("index \"%s\" now contains %.0f row versions in %u pages");
2383 
2384  ereport(elevel,
2385  (errmsg(msg,
2386  RelationGetRelationName(indrel),
2387  (*stats)->num_index_tuples,
2388  (*stats)->num_pages),
2389  errdetail("%.0f index row versions were removed.\n"
2390  "%u index pages have been deleted, %u are currently reusable.\n"
2391  "%s.",
2392  (*stats)->tuples_removed,
2393  (*stats)->pages_deleted, (*stats)->pages_free,
2394  pg_rusage_show(&ru0))));
2395 }
bool analyze_only
Definition: genam.h:47
bool report_progress
Definition: genam.h:48
BufferAccessStrategy strategy
Definition: genam.h:52
#define gettext_noop(x)
Definition: c.h:1160
Relation index
Definition: genam.h:46
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int errdetail(const char *fmt,...)
Definition: elog.c:957
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define RelationGetRelationName(relation)
Definition: rel.h:469
#define IsParallelWorker()
Definition: parallel.h:61
static int elevel
Definition: vacuumlazy.c:297
#define ereport(elevel,...)
Definition: elog.h:144
int message_level
Definition: genam.h:50
double num_heap_tuples
Definition: genam.h:51
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:303
IndexBulkDeleteResult * index_vacuum_cleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: indexam.c:703
int errmsg(const char *fmt,...)
Definition: elog.c:824
bool estimated_count
Definition: genam.h:49

◆ lazy_parallel_vacuum_indexes()

static void lazy_parallel_vacuum_indexes ( Relation Irel,
IndexBulkDeleteResult **  stats,
LVRelStats vacrelstats,
LVParallelState lps,
int  nindexes 
)
static

Definition at line 1989 of file vacuumlazy.c.

References LVShared::active_nworkers, Assert, LVShared::cost_balance, LVRelStats::dead_tuples, elevel, ereport, errmsg(), LVShared::first_time, LVShared::for_cleanup, LVShared::idx, IsParallelWorker, LaunchParallelWorkers(), LVParallelState::lvshared, Min, ngettext, LVParallelState::nindexes_parallel_bulkdel, LVParallelState::nindexes_parallel_cleanup, LVParallelState::nindexes_parallel_condcleanup, LVRelStats::num_index_scans, ParallelContext::nworkers, ParallelContext::nworkers_launched, parallel_vacuum_index(), ParallelVacuumIsActive, LVParallelState::pcxt, pg_atomic_read_u32(), pg_atomic_write_u32(), ReinitializeParallelDSM(), ReinitializeParallelWorkers(), vacuum_indexes_leader(), VacuumActiveNWorkers, VacuumCostBalance, VacuumCostBalanceLocal, VacuumSharedCostBalance, and WaitForParallelWorkersToFinish().

Referenced by lazy_cleanup_all_indexes(), and lazy_vacuum_all_indexes().

1992 {
1993  int nworkers;
1994 
1997  Assert(nindexes > 0);
1998 
1999  /* Determine the number of parallel workers to launch */
2000  if (lps->lvshared->for_cleanup)
2001  {
2002  if (lps->lvshared->first_time)
2003  nworkers = lps->nindexes_parallel_cleanup +
2005  else
2006  nworkers = lps->nindexes_parallel_cleanup;
2007  }
2008  else
2009  nworkers = lps->nindexes_parallel_bulkdel;
2010 
2011  /* The leader process will participate */
2012  nworkers--;
2013 
2014  /*
2015  * It is possible that parallel context is initialized with fewer workers
2016  * than the number of indexes that need a separate worker in the current
2017  * phase, so we need to consider it. See compute_parallel_vacuum_workers.
2018  */
2019  nworkers = Min(nworkers, lps->pcxt->nworkers);
2020 
2021  /* Setup the shared cost-based vacuum delay and launch workers */
2022  if (nworkers > 0)
2023  {
2024  if (vacrelstats->num_index_scans > 0)
2025  {
2026  /* Reset the parallel index processing counter */
2027  pg_atomic_write_u32(&(lps->lvshared->idx), 0);
2028 
2029  /* Reinitialize the parallel context to relaunch parallel workers */
2031  }
2032 
2033  /*
2034  * Set up shared cost balance and the number of active workers for
2035  * vacuum delay. We need to do this before launching workers as
2036  * otherwise, they might not see the updated values for these
2037  * parameters.
2038  */
2041 
2042  /*
2043  * The number of workers can vary between bulkdelete and cleanup
2044  * phase.
2045  */
2046  ReinitializeParallelWorkers(lps->pcxt, nworkers);
2047 
2049 
2050  if (lps->pcxt->nworkers_launched > 0)
2051  {
2052  /*
2053  * Reset the local cost values for leader backend as we have
2054  * already accumulated the remaining balance of heap.
2055  */
2056  VacuumCostBalance = 0;
2058 
2059  /* Enable shared cost balance for leader backend */
2062  }
2063 
2064  if (lps->lvshared->for_cleanup)
2065  ereport(elevel,
2066  (errmsg(ngettext("launched %d parallel vacuum worker for index cleanup (planned: %d)",
2067  "launched %d parallel vacuum workers for index cleanup (planned: %d)",
2068  lps->pcxt->nworkers_launched),
2069  lps->pcxt->nworkers_launched, nworkers)));
2070  else
2071  ereport(elevel,
2072  (errmsg(ngettext("launched %d parallel vacuum worker for index vacuuming (planned: %d)",
2073  "launched %d parallel vacuum workers for index vacuuming (planned: %d)",
2074  lps->pcxt->nworkers_launched),
2075  lps->pcxt->nworkers_launched, nworkers)));
2076  }
2077 
2078  /* Process the indexes that can be processed by only leader process */
2079  vacuum_indexes_leader(Irel, stats, vacrelstats, lps, nindexes);
2080 
2081  /*
2082  * Join as a parallel worker. The leader process alone processes all the
2083  * indexes in the case where no workers are launched.
2084  */
2085  parallel_vacuum_index(Irel, stats, lps->lvshared,
2086  vacrelstats->dead_tuples, nindexes);
2087 
2088  /* Wait for all vacuum workers to finish */
2090 
2091  /*
2092  * Carry the shared balance value to heap scan and disable shared costing
2093  */
2095  {
2097  VacuumSharedCostBalance = NULL;
2098  VacuumActiveNWorkers = NULL;
2099  }
2100 }
pg_atomic_uint32 * VacuumActiveNWorkers
Definition: vacuum.c:77
int VacuumCostBalance
Definition: globals.c:147
LVDeadTuples * dead_tuples
Definition: vacuumlazy.c:289
#define Min(x, y)
Definition: c.h:920
int nindexes_parallel_bulkdel
Definition: vacuumlazy.c:266
int nindexes_parallel_condcleanup
Definition: vacuumlazy.c:268
void ReinitializeParallelWorkers(ParallelContext *pcxt, int nworkers_to_launch)
Definition: parallel.c:501
ParallelContext * pcxt
Definition: vacuumlazy.c:257
pg_atomic_uint32 cost_balance
Definition: vacuumlazy.c:217
void WaitForParallelWorkersToFinish(ParallelContext *pcxt)
Definition: parallel.c:738
pg_atomic_uint32 idx
Definition: vacuumlazy.c:231
int nworkers_launched
Definition: parallel.h:38
void LaunchParallelWorkers(ParallelContext *pcxt)
Definition: parallel.c:515
static void vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes)
Definition: vacuumlazy.c:2159
#define IsParallelWorker()
Definition: parallel.h:61
bool first_time
Definition: vacuumlazy.c:188
int VacuumCostBalanceLocal
Definition: vacuum.c:78
pg_atomic_uint32 * VacuumSharedCostBalance
Definition: vacuum.c:76
static int elevel
Definition: vacuumlazy.c:297
#define ngettext(s, p, n)
Definition: c.h:1146
int nindexes_parallel_cleanup
Definition: vacuumlazy.c:267
#define ParallelVacuumIsActive(lps)
Definition: vacuumlazy.c:145
void ReinitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:451
pg_atomic_uint32 active_nworkers
Definition: vacuumlazy.c:224
#define ereport(elevel,...)
Definition: elog.h:144
int num_index_scans
Definition: vacuumlazy.c:290
static void parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats, LVShared *lvshared, LVDeadTuples *dead_tuples, int nindexes)
Definition: vacuumlazy.c:2107
#define Assert(condition)
Definition: c.h:738
LVShared * lvshared
Definition: vacuumlazy.c:260
bool for_cleanup
Definition: vacuumlazy.c:187
int errmsg(const char *fmt,...)
Definition: elog.c:824
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:258
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ lazy_record_dead_tuple()

static void lazy_record_dead_tuple ( LVDeadTuples dead_tuples,
ItemPointer  itemptr 
)
static

Definition at line 2756 of file vacuumlazy.c.

References LVDeadTuples::itemptrs, LVDeadTuples::max_tuples, LVDeadTuples::num_tuples, pgstat_progress_update_param(), and PROGRESS_VACUUM_NUM_DEAD_TUPLES.

Referenced by lazy_scan_heap().

2757 {
2758  /*
2759  * The array shouldn't overflow under normal behavior, but perhaps it
2760  * could if we are given a really small maintenance_work_mem. In that
2761  * case, just forget the last few tuples (we'll get 'em next time).
2762  */
2763  if (dead_tuples->num_tuples < dead_tuples->max_tuples)
2764  {
2765  dead_tuples->itemptrs[dead_tuples->num_tuples] = *itemptr;
2766  dead_tuples->num_tuples++;
2768  dead_tuples->num_tuples);
2769  }
2770 }
ItemPointerData itemptrs[FLEXIBLE_ARRAY_MEMBER]
Definition: vacuumlazy.c:158
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3161
#define PROGRESS_VACUUM_NUM_DEAD_TUPLES
Definition: progress.h:27

◆ lazy_scan_heap()

static void lazy_scan_heap ( Relation  onerel,
VacuumParams params,
LVRelStats vacrelstats,
Relation Irel,
int  nindexes,
bool  aggressive 
)
static

Definition at line 694 of file vacuumlazy.c.

References _, appendStringInfo(), Assert, begin_parallel_vacuum(), buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferGetPageSize, BufferIsValid, ConditionalLockBufferForCleanup(), StringInfoData::data, LVRelStats::dead_tuples, elevel, elog, END_CRIT_SECTION, end_parallel_vacuum(), ereport, errdetail_internal(), errmsg(), ERROR, FirstOffsetNumber, FORCE_CHECK_PAGE, FreeSpaceMapVacuumRange(), FreezeLimit, LVRelStats::frozenskipped_pages, get_namespace_name(), GetRecordedFreeSpace(), heap_execute_freeze_tuple(), heap_page_prune(), heap_prepare_freeze_tuple(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderAdvanceLatestRemovedXid(), HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleSatisfiesVacuum(), i, VacuumParams::index_cleanup, initStringInfo(), InvalidBuffer, InvalidTransactionId, InvalidXLogRecPtr, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, LVRelStats::latestRemovedXid, lazy_check_needs_freeze(), lazy_cleanup_all_indexes(), lazy_record_dead_tuple(), lazy_space_alloc(), lazy_vacuum_all_indexes(), lazy_vacuum_heap(), lazy_vacuum_page(), LockBuffer(), LockBufferForCleanup(), log_heap_freeze(), log_newpage_buffer(), MAIN_FORKNUM, MarkBufferDirty(), LVDeadTuples::max_tuples, MaxHeapTuplesPerPage, MultiXactCutoff, LVRelStats::new_dead_tuples, LVRelStats::new_live_tuples, LVRelStats::new_rel_tuples, ngettext, LVRelStats::nonempty_pages, LVDeadTuples::num_tuples, VacuumParams::nworkers, xl_heap_freeze_tuple::offset, OffsetNumberNext, OldestXmin, VacuumParams::options, PageClearAllVisible, PageGetHeapFreeSpace(), PageGetItem, PageGetItemId, PageGetLSN, PageGetMaxOffsetNumber, PageIsAllVisible, PageIsEmpty, PageIsNew, PageSetAllVisible, PageSetLSN, palloc(), palloc0(), ParallelVacuumIsActive, pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), LVRelStats::pinskipped_pages, PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, RBM_NORMAL, RelationData::rd_rel, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelStats::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, RelationUsesLocalBuffers, ReleaseBuffer(), relname, LVRelStats::scanned_pages, SizeOfPageHeaderData, SKIP_PAGES_THRESHOLD, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdPrecedes(), LVRelStats::tupcount_pages, LVRelStats::tuples_deleted, UnlockReleaseBuffer(), update_index_statistics(), LVRelStats::useindex, vac_estimate_reltuples(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_TERNARY_DISABLED, vacuum_delay_point(), VACUUM_FSM_EVERY_PAGES, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, VM_ALL_FROZEN, VM_ALL_VISIBLE, and WARNING.

Referenced by heap_vacuum_rel().

696 {
697  LVParallelState *lps = NULL;
698  LVDeadTuples *dead_tuples;
699  BlockNumber nblocks,
700  blkno;
701  HeapTupleData tuple;
702  char *relname;
703  TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid;
704  TransactionId relminmxid = onerel->rd_rel->relminmxid;
705  BlockNumber empty_pages,
706  vacuumed_pages,
707  next_fsm_block_to_vacuum;
708  double num_tuples, /* total number of nonremovable tuples */
709  live_tuples, /* live tuples (reltuples estimate) */
710  tups_vacuumed, /* tuples cleaned up by vacuum */
711  nkeep, /* dead-but-not-removable tuples */
712  nunused; /* unused line pointers */
713  IndexBulkDeleteResult **indstats;
714  int i;
715  PGRUsage ru0;
716  Buffer vmbuffer = InvalidBuffer;
717  BlockNumber next_unskippable_block;
718  bool skipping_blocks;
719  xl_heap_freeze_tuple *frozen;
721  const int initprog_index[] = {
725  };
726  int64 initprog_val[3];
727 
728  pg_rusage_init(&ru0);
729 
730  relname = RelationGetRelationName(onerel);
731  if (aggressive)
732  ereport(elevel,
733  (errmsg("aggressively vacuuming \"%s.%s\"",
735  relname)));
736  else
737  ereport(elevel,
738  (errmsg("vacuuming \"%s.%s\"",
740  relname)));
741 
742  empty_pages = vacuumed_pages = 0;
743  next_fsm_block_to_vacuum = (BlockNumber) 0;
744  num_tuples = live_tuples = tups_vacuumed = nkeep = nunused = 0;
745 
746  indstats = (IndexBulkDeleteResult **)
747  palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
748 
749  nblocks = RelationGetNumberOfBlocks(onerel);
750  vacrelstats->rel_pages = nblocks;
751  vacrelstats->scanned_pages = 0;
752  vacrelstats->tupcount_pages = 0;
753  vacrelstats->nonempty_pages = 0;
754  vacrelstats->latestRemovedXid = InvalidTransactionId;
755 
756  /*
757  * Initialize the state for a parallel vacuum. As of now, only one worker
758  * can be used for an index, so we invoke parallelism only if there are at
759  * least two indexes on a table.
760  */
761  if (params->nworkers >= 0 && vacrelstats->useindex && nindexes > 1)
762  {
763  /*
764  * Since parallel workers cannot access data in temporary tables, we
765  * can't perform parallel vacuum on them.
766  */
767  if (RelationUsesLocalBuffers(onerel))
768  {
769  /*
770  * Give warning only if the user explicitly tries to perform a
771  * parallel vacuum on the temporary table.
772  */
773  if (params->nworkers > 0)
775  (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
776  RelationGetRelationName(onerel))));
777  }
778  else
779  lps = begin_parallel_vacuum(RelationGetRelid(onerel), Irel,
780  vacrelstats, nblocks, nindexes,
781  params->nworkers);
782  }
783 
784  /*
785  * Allocate the space for dead tuples in case the parallel vacuum is not
786  * initialized.
787  */
788  if (!ParallelVacuumIsActive(lps))
789  lazy_space_alloc(vacrelstats, nblocks);
790 
791  dead_tuples = vacrelstats->dead_tuples;
793 
794  /* Report that we're scanning the heap, advertising total # of blocks */
795  initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
796  initprog_val[1] = nblocks;
797  initprog_val[2] = dead_tuples->max_tuples;
798  pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
799 
800  /*
801  * Except when aggressive is set, we want to skip pages that are
802  * all-visible according to the visibility map, but only when we can skip
803  * at least SKIP_PAGES_THRESHOLD consecutive pages. Since we're reading
804  * sequentially, the OS should be doing readahead for us, so there's no
805  * gain in skipping a page now and then; that's likely to disable
806  * readahead and so be counterproductive. Also, skipping even a single
807  * page means that we can't update relfrozenxid, so we only want to do it
808  * if we can skip a goodly number of pages.
809  *
810  * When aggressive is set, we can't skip pages just because they are
811  * all-visible, but we can still skip pages that are all-frozen, since
812  * such pages do not need freezing and do not affect the value that we can
813  * safely set for relfrozenxid or relminmxid.
814  *
815  * Before entering the main loop, establish the invariant that
816  * next_unskippable_block is the next block number >= blkno that we can't
817  * skip based on the visibility map, either all-visible for a regular scan
818  * or all-frozen for an aggressive scan. We set it to nblocks if there's
819  * no such block. We also set up the skipping_blocks flag correctly at
820  * this stage.
821  *
822  * Note: The value returned by visibilitymap_get_status could be slightly
823  * out-of-date, since we make this test before reading the corresponding
824  * heap page or locking the buffer. This is OK. If we mistakenly think
825  * that the page is all-visible or all-frozen when in fact the flag's just
826  * been cleared, we might fail to vacuum the page. It's easy to see that
827  * skipping a page when aggressive is not set is not a very big deal; we
828  * might leave some dead tuples lying around, but the next vacuum will
829  * find them. But even when aggressive *is* set, it's still OK if we miss
830  * a page whose all-frozen marking has just been cleared. Any new XIDs
831  * just added to that page are necessarily newer than the GlobalXmin we
832  * computed, so they'll have no effect on the value to which we can safely
833  * set relfrozenxid. A similar argument applies for MXIDs and relminmxid.
834  *
835  * We will scan the table's last page, at least to the extent of
836  * determining whether it has tuples or not, even if it should be skipped
837  * according to the above rules; except when we've already determined that
838  * it's not worth trying to truncate the table. This avoids having
839  * lazy_truncate_heap() take access-exclusive lock on the table to attempt
840  * a truncation that just fails immediately because there are tuples in
841  * the last page. This is worth avoiding mainly because such a lock must
842  * be replayed on any hot standby, where it can be disruptive.
843  */
844  next_unskippable_block = 0;
845  if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
846  {
847  while (next_unskippable_block < nblocks)
848  {
849  uint8 vmstatus;
850 
851  vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
852  &vmbuffer);
853  if (aggressive)
854  {
855  if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
856  break;
857  }
858  else
859  {
860  if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
861  break;
862  }
864  next_unskippable_block++;
865  }
866  }
867 
868  if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
869  skipping_blocks = true;
870  else
871  skipping_blocks = false;
872 
873  for (blkno = 0; blkno < nblocks; blkno++)
874  {
875  Buffer buf;
876  Page page;
877  OffsetNumber offnum,
878  maxoff;
879  bool tupgone,
880  hastup;
881  int prev_dead_count;
882  int nfrozen;
883  Size freespace;
884  bool all_visible_according_to_vm = false;
885  bool all_visible;
886  bool all_frozen = true; /* provided all_visible is also true */
887  bool has_dead_tuples;
888  TransactionId visibility_cutoff_xid = InvalidTransactionId;
889 
890  /* see note above about forcing scanning of last page */
891 #define FORCE_CHECK_PAGE() \
892  (blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))
893 
895 
896  if (blkno == next_unskippable_block)
897  {
898  /* Time to advance next_unskippable_block */
899  next_unskippable_block++;
900  if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
901  {
902  while (next_unskippable_block < nblocks)
903  {
904  uint8 vmskipflags;
905 
906  vmskipflags = visibilitymap_get_status(onerel,
907  next_unskippable_block,
908  &vmbuffer);
909  if (aggressive)
910  {
911  if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
912  break;
913  }
914  else
915  {
916  if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
917  break;
918  }
920  next_unskippable_block++;
921  }
922  }
923 
924  /*
925  * We know we can't skip the current block. But set up
926  * skipping_blocks to do the right thing at the following blocks.
927  */
928  if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD)
929  skipping_blocks = true;
930  else
931  skipping_blocks = false;
932 
933  /*
934  * Normally, the fact that we can't skip this block must mean that
935  * it's not all-visible. But in an aggressive vacuum we know only
936  * that it's not all-frozen, so it might still be all-visible.
937  */
938  if (aggressive && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
939  all_visible_according_to_vm = true;
940  }
941  else
942  {
943  /*
944  * The current block is potentially skippable; if we've seen a
945  * long enough run of skippable blocks to justify skipping it, and
946  * we're not forced to check it, then go ahead and skip.
947  * Otherwise, the page must be at least all-visible if not
948  * all-frozen, so we can set all_visible_according_to_vm = true.
949  */
950  if (skipping_blocks && !FORCE_CHECK_PAGE())
951  {
952  /*
953  * Tricky, tricky. If this is in aggressive vacuum, the page
954  * must have been all-frozen at the time we checked whether it
955  * was skippable, but it might not be any more. We must be
956  * careful to count it as a skipped all-frozen page in that
957  * case, or else we'll think we can't update relfrozenxid and
958  * relminmxid. If it's not an aggressive vacuum, we don't
959  * know whether it was all-frozen, so we have to recheck; but
960  * in this case an approximate answer is OK.
961  */
962  if (aggressive || VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
963  vacrelstats->frozenskipped_pages++;
964  continue;
965  }
966  all_visible_according_to_vm = true;
967  }
968 
970 
971  /*
972  * If we are close to overrunning the available space for dead-tuple
973  * TIDs, pause and do a cycle of vacuuming before we tackle this page.
974  */
975  if ((dead_tuples->max_tuples - dead_tuples->num_tuples) < MaxHeapTuplesPerPage &&
976  dead_tuples->num_tuples > 0)
977  {
978  /*
979  * Before beginning index vacuuming, we release any pin we may
980  * hold on the visibility map page. This isn't necessary for
981  * correctness, but we do it anyway to avoid holding the pin
982  * across a lengthy, unrelated operation.
983  */
984  if (BufferIsValid(vmbuffer))
985  {
986  ReleaseBuffer(vmbuffer);
987  vmbuffer = InvalidBuffer;
988  }
989 
990  /* Work on all the indexes, then the heap */
991  lazy_vacuum_all_indexes(onerel, Irel, indstats,
992  vacrelstats, lps, nindexes);
993 
994  /* Remove tuples from heap */
995  lazy_vacuum_heap(onerel, vacrelstats);
996 
997  /*
998  * Forget the now-vacuumed tuples, and press on, but be careful
999  * not to reset latestRemovedXid since we want that value to be
1000  * valid.
1001  */
1002  dead_tuples->num_tuples = 0;
1003 
1004  /*
1005  * Vacuum the Free Space Map to make newly-freed space visible on
1006  * upper-level FSM pages. Note we have not yet processed blkno.
1007  */
1008  FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno);
1009  next_fsm_block_to_vacuum = blkno;
1010 
1011  /* Report that we are once again scanning the heap */
1014  }
1015 
1016  /*
1017  * Pin the visibility map page in case we need to mark the page
1018  * all-visible. In most cases this will be very cheap, because we'll
1019  * already have the correct page pinned anyway. However, it's
1020  * possible that (a) next_unskippable_block is covered by a different
1021  * VM page than the current block or (b) we released our pin and did a
1022  * cycle of index vacuuming.
1023  *
1024  */
1025  visibilitymap_pin(onerel, blkno, &vmbuffer);
1026 
1027  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
1029 
1030  /* We need buffer cleanup lock so that we can prune HOT chains. */
1032  {
1033  /*
1034  * If we're not performing an aggressive scan to guard against XID
1035  * wraparound, and we don't want to forcibly check the page, then
1036  * it's OK to skip vacuuming pages we get a lock conflict on. They
1037  * will be dealt with in some future vacuum.
1038  */
1039  if (!aggressive && !FORCE_CHECK_PAGE())
1040  {
1041  ReleaseBuffer(buf);
1042  vacrelstats->pinskipped_pages++;
1043  continue;
1044  }
1045 
1046  /*
1047  * Read the page with share lock to see if any xids on it need to
1048  * be frozen. If not we just skip the page, after updating our
1049  * scan statistics. If there are some, we wait for cleanup lock.
1050  *
1051  * We could defer the lock request further by remembering the page
1052  * and coming back to it later, or we could even register
1053  * ourselves for multiple buffers and then service whichever one
1054  * is received first. For now, this seems good enough.
1055  *
1056  * If we get here with aggressive false, then we're just forcibly
1057  * checking the page, and so we don't want to insist on getting
1058  * the lock; we only need to know if the page contains tuples, so
1059  * that we can update nonempty_pages correctly. It's convenient
1060  * to use lazy_check_needs_freeze() for both situations, though.
1061  */
1063  if (!lazy_check_needs_freeze(buf, &hastup))
1064  {
1065  UnlockReleaseBuffer(buf);
1066  vacrelstats->scanned_pages++;
1067  vacrelstats->pinskipped_pages++;
1068  if (hastup)
1069  vacrelstats->nonempty_pages = blkno + 1;
1070  continue;
1071  }
1072  if (!aggressive)
1073  {
1074  /*
1075  * Here, we must not advance scanned_pages; that would amount
1076  * to claiming that the page contains no freezable tuples.
1077  */
1078  UnlockReleaseBuffer(buf);
1079  vacrelstats->pinskipped_pages++;
1080  if (hastup)
1081  vacrelstats->nonempty_pages = blkno + 1;
1082  continue;
1083  }
1085  LockBufferForCleanup(buf);
1086  /* drop through to normal processing */
1087  }
1088 
1089  vacrelstats->scanned_pages++;
1090  vacrelstats->tupcount_pages++;
1091 
1092  page = BufferGetPage(buf);
1093 
1094  if (PageIsNew(page))
1095  {
1096  bool still_new;
1097 
1098  /*
1099  * All-zeroes pages can be left over if either a backend extends
1100  * the relation by a single page, but crashes before the newly
1101  * initialized page has been written out, or when bulk-extending
1102  * the relation (which creates a number of empty pages at the tail
1103  * end of the relation, but enters them into the FSM).
1104  *
1105  * Make sure these pages are in the FSM, to ensure they can be
1106  * reused. Do that by testing if there's any space recorded for
1107  * the page. If not, enter it.
1108  *
1109  * Note we do not enter the page into the visibilitymap. That has
1110  * the downside that we repeatedly visit this page in subsequent
1111  * vacuums, but otherwise we'll never not discover the space on a
1112  * promoted standby. The harm of repeated checking ought to
1113  * normally not be too bad - the space usually should be used at
1114  * some point, otherwise there wouldn't be any regular vacuums.
1115  */
1116 
1117  /*
1118  * Perform checking of FSM after releasing lock, the fsm is
1119  * approximate, after all.
1120  */
1121  still_new = PageIsNew(page);
1122  UnlockReleaseBuffer(buf);
1123 
1124  if (still_new)
1125  {
1126  empty_pages++;
1127 
1128  if (GetRecordedFreeSpace(onerel, blkno) == 0)
1129  {
1130  Size freespace;
1131 
1132  freespace = BufferGetPageSize(buf) - SizeOfPageHeaderData;
1133  RecordPageWithFreeSpace(onerel, blkno, freespace);
1134  }
1135  }
1136  continue;
1137  }
1138 
1139  if (PageIsEmpty(page))
1140  {
1141  empty_pages++;
1142  freespace = PageGetHeapFreeSpace(page);
1143 
1144  /*
1145  * Empty pages are always all-visible and all-frozen (note that
1146  * the same is currently not true for new pages, see above).
1147  */
1148  if (!PageIsAllVisible(page))
1149  {
1151 
1152  /* mark buffer dirty before writing a WAL record */
1153  MarkBufferDirty(buf);
1154 
1155  /*
1156  * It's possible that another backend has extended the heap,
1157  * initialized the page, and then failed to WAL-log the page
1158  * due to an ERROR. Since heap extension is not WAL-logged,
1159  * recovery might try to replay our record setting the page
1160  * all-visible and find that the page isn't initialized, which
1161  * will cause a PANIC. To prevent that, check whether the
1162  * page has been previously WAL-logged, and if not, do that
1163  * now.
1164  */
1165  if (RelationNeedsWAL(onerel) &&
1166  PageGetLSN(page) == InvalidXLogRecPtr)
1167  log_newpage_buffer(buf, true);
1168 
1169  PageSetAllVisible(page);
1170  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1171  vmbuffer, InvalidTransactionId,
1173  END_CRIT_SECTION();
1174  }
1175 
1176  UnlockReleaseBuffer(buf);
1177  RecordPageWithFreeSpace(onerel, blkno, freespace);
1178  continue;
1179  }
1180 
1181  /*
1182  * Prune all HOT-update chains in this page.
1183  *
1184  * We count tuples removed by the pruning step as removed by VACUUM.
1185  */
1186  tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
1187  &vacrelstats->latestRemovedXid);
1188 
1189  /*
1190  * Now scan the page to collect vacuumable items and check for tuples
1191  * requiring freezing.
1192  */
1193  all_visible = true;
1194  has_dead_tuples = false;
1195  nfrozen = 0;
1196  hastup = false;
1197  prev_dead_count = dead_tuples->num_tuples;
1198  maxoff = PageGetMaxOffsetNumber(page);
1199 
1200  /*
1201  * Note: If you change anything in the loop below, also look at
1202  * heap_page_is_all_visible to see if that needs to be changed.
1203  */
1204  for (offnum = FirstOffsetNumber;
1205  offnum <= maxoff;
1206  offnum = OffsetNumberNext(offnum))
1207  {
1208  ItemId itemid;
1209 
1210  itemid = PageGetItemId(page, offnum);
1211 
1212  /* Unused items require no processing, but we count 'em */
1213  if (!ItemIdIsUsed(itemid))
1214  {
1215  nunused += 1;
1216  continue;
1217  }
1218 
1219  /* Redirect items mustn't be touched */
1220  if (ItemIdIsRedirected(itemid))
1221  {
1222  hastup = true; /* this page won't be truncatable */
1223  continue;
1224  }
1225 
1226  ItemPointerSet(&(tuple.t_self), blkno, offnum);
1227 
1228  /*
1229  * DEAD line pointers are to be vacuumed normally; but we don't
1230  * count them in tups_vacuumed, else we'd be double-counting (at
1231  * least in the common case where heap_page_prune() just freed up
1232  * a non-HOT tuple).
1233  */
1234  if (ItemIdIsDead(itemid))
1235  {
1236  lazy_record_dead_tuple(dead_tuples, &(tuple.t_self));
1237  all_visible = false;
1238  continue;
1239  }
1240 
1241  Assert(ItemIdIsNormal(itemid));
1242 
1243  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1244  tuple.t_len = ItemIdGetLength(itemid);
1245  tuple.t_tableOid = RelationGetRelid(onerel);
1246 
1247  tupgone = false;
1248 
1249  /*
1250  * The criteria for counting a tuple as live in this block need to
1251  * match what analyze.c's acquire_sample_rows() does, otherwise
1252  * VACUUM and ANALYZE may produce wildly different reltuples
1253  * values, e.g. when there are many recently-dead tuples.
1254  *
1255  * The logic here is a bit simpler than acquire_sample_rows(), as
1256  * VACUUM can't run inside a transaction block, which makes some
1257  * cases impossible (e.g. in-progress insert from the same
1258  * transaction).
1259  */
1260  switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
1261  {
1262  case HEAPTUPLE_DEAD:
1263 
1264  /*
1265  * Ordinarily, DEAD tuples would have been removed by
1266  * heap_page_prune(), but it's possible that the tuple
1267  * state changed since heap_page_prune() looked. In
1268  * particular an INSERT_IN_PROGRESS tuple could have
1269  * changed to DEAD if the inserter aborted. So this
1270  * cannot be considered an error condition.
1271  *
1272  * If the tuple is HOT-updated then it must only be
1273  * removed by a prune operation; so we keep it just as if
1274  * it were RECENTLY_DEAD. Also, if it's a heap-only
1275  * tuple, we choose to keep it, because it'll be a lot
1276  * cheaper to get rid of it in the next pruning pass than
1277  * to treat it like an indexed tuple. Finally, if index
1278  * cleanup is disabled, the second heap pass will not
1279  * execute, and the tuple will not get removed, so we must
1280  * treat it like any other dead tuple that we choose to
1281  * keep.
1282  *
1283  * If this were to happen for a tuple that actually needed
1284  * to be deleted, we'd be in trouble, because it'd
1285  * possibly leave a tuple below the relation's xmin
1286  * horizon alive. heap_prepare_freeze_tuple() is prepared
1287  * to detect that case and abort the transaction,
1288  * preventing corruption.
1289  */
1290  if (HeapTupleIsHotUpdated(&tuple) ||
1291  HeapTupleIsHeapOnly(&tuple) ||
1293  nkeep += 1;
1294  else
1295  tupgone = true; /* we can delete the tuple */
1296  all_visible = false;
1297  break;
1298  case HEAPTUPLE_LIVE:
1299 
1300  /*
1301  * Count it as live. Not only is this natural, but it's
1302  * also what acquire_sample_rows() does.
1303  */
1304  live_tuples += 1;
1305 
1306  /*
1307  * Is the tuple definitely visible to all transactions?
1308  *
1309  * NB: Like with per-tuple hint bits, we can't set the
1310  * PD_ALL_VISIBLE flag if the inserter committed
1311  * asynchronously. See SetHintBits for more info. Check
1312  * that the tuple is hinted xmin-committed because of
1313  * that.
1314  */
1315  if (all_visible)
1316  {
1317  TransactionId xmin;
1318 
1320  {
1321  all_visible = false;
1322  break;
1323  }
1324 
1325  /*
1326  * The inserter definitely committed. But is it old
1327  * enough that everyone sees it as committed?
1328  */
1329  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1330  if (!TransactionIdPrecedes(xmin, OldestXmin))
1331  {
1332  all_visible = false;
1333  break;
1334  }
1335 
1336  /* Track newest xmin on page. */
1337  if (TransactionIdFollows(xmin, visibility_cutoff_xid))
1338  visibility_cutoff_xid = xmin;
1339  }
1340  break;
1342 
1343  /*
1344  * If tuple is recently deleted then we must not remove it
1345  * from relation.
1346  */
1347  nkeep += 1;
1348  all_visible = false;
1349  break;
1351 
1352  /*
1353  * This is an expected case during concurrent vacuum.
1354  *
1355  * We do not count these rows as live, because we expect
1356  * the inserting transaction to update the counters at
1357  * commit, and we assume that will happen only after we
1358  * report our results. This assumption is a bit shaky,
1359  * but it is what acquire_sample_rows() does, so be
1360  * consistent.
1361  */
1362  all_visible = false;
1363  break;
1365  /* This is an expected case during concurrent vacuum */
1366  all_visible = false;
1367 
1368  /*
1369  * Count such rows as live. As above, we assume the
1370  * deleting transaction will commit and update the
1371  * counters after we report.
1372  */
1373  live_tuples += 1;
1374  break;
1375  default:
1376  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1377  break;
1378  }
1379 
1380  if (tupgone)
1381  {
1382  lazy_record_dead_tuple(dead_tuples, &(tuple.t_self));
1384  &vacrelstats->latestRemovedXid);
1385  tups_vacuumed += 1;
1386  has_dead_tuples = true;
1387  }
1388  else
1389  {
1390  bool tuple_totally_frozen;
1391 
1392  num_tuples += 1;
1393  hastup = true;
1394 
1395  /*
1396  * Each non-removable tuple must be checked to see if it needs
1397  * freezing. Note we already have exclusive buffer lock.
1398  */
1400  relfrozenxid, relminmxid,
1402  &frozen[nfrozen],
1403  &tuple_totally_frozen))
1404  frozen[nfrozen++].offset = offnum;
1405 
1406  if (!tuple_totally_frozen)
1407  all_frozen = false;
1408  }
1409  } /* scan along page */
1410 
1411  /*
1412  * If we froze any tuples, mark the buffer dirty, and write a WAL
1413  * record recording the changes. We must log the changes to be
1414  * crash-safe against future truncation of CLOG.
1415  */
1416  if (nfrozen > 0)
1417  {
1419 
1420  MarkBufferDirty(buf);
1421 
1422  /* execute collected freezes */
1423  for (i = 0; i < nfrozen; i++)
1424  {
1425  ItemId itemid;
1426  HeapTupleHeader htup;
1427 
1428  itemid = PageGetItemId(page, frozen[i].offset);
1429  htup = (HeapTupleHeader) PageGetItem(page, itemid);
1430 
1431  heap_execute_freeze_tuple(htup, &frozen[i]);
1432  }
1433 
1434  /* Now WAL-log freezing if necessary */
1435  if (RelationNeedsWAL(onerel))
1436  {
1437  XLogRecPtr recptr;
1438 
1439  recptr = log_heap_freeze(onerel, buf, FreezeLimit,
1440  frozen, nfrozen);
1441  PageSetLSN(page, recptr);
1442  }
1443 
1444  END_CRIT_SECTION();
1445  }
1446 
1447  /*
1448  * If there are no indexes we can vacuum the page right now instead of
1449  * doing a second scan. Also we don't do that but forget dead tuples
1450  * when index cleanup is disabled.
1451  */
1452  if (!vacrelstats->useindex && dead_tuples->num_tuples > 0)
1453  {
1454  if (nindexes == 0)
1455  {
1456  /* Remove tuples from heap if the table has no index */
1457  lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
1458  vacuumed_pages++;
1459  has_dead_tuples = false;
1460  }
1461  else
1462  {
1463  /*
1464  * Here, we have indexes but index cleanup is disabled.
1465  * Instead of vacuuming the dead tuples on the heap, we just
1466  * forget them.
1467  *
1468  * Note that vacrelstats->dead_tuples could have tuples which
1469  * became dead after HOT-pruning but are not marked dead yet.
1470  * We do not process them because it's a very rare condition,
1471  * and the next vacuum will process them anyway.
1472  */
1474  }
1475 
1476  /*
1477  * Forget the now-vacuumed tuples, and press on, but be careful
1478  * not to reset latestRemovedXid since we want that value to be
1479  * valid.
1480  */
1481  dead_tuples->num_tuples = 0;
1482 
1483  /*
1484  * Periodically do incremental FSM vacuuming to make newly-freed
1485  * space visible on upper FSM pages. Note: although we've cleaned
1486  * the current block, we haven't yet updated its FSM entry (that
1487  * happens further down), so passing end == blkno is correct.
1488  */
1489  if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1490  {
1491  FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum,
1492  blkno);
1493  next_fsm_block_to_vacuum = blkno;
1494  }
1495  }
1496 
1497  freespace = PageGetHeapFreeSpace(page);
1498 
1499  /* mark page all-visible, if appropriate */
1500  if (all_visible && !all_visible_according_to_vm)
1501  {
1503 
1504  if (all_frozen)
1505  flags |= VISIBILITYMAP_ALL_FROZEN;
1506 
1507  /*
1508  * It should never be the case that the visibility map page is set
1509  * while the page-level bit is clear, but the reverse is allowed
1510  * (if checksums are not enabled). Regardless, set both bits so
1511  * that we get back in sync.
1512  *
1513  * NB: If the heap page is all-visible but the VM bit is not set,
1514  * we don't need to dirty the heap page. However, if checksums
1515  * are enabled, we do need to make sure that the heap page is
1516  * dirtied before passing it to visibilitymap_set(), because it
1517  * may be logged. Given that this situation should only happen in
1518  * rare cases after a crash, it is not worth optimizing.
1519  */
1520  PageSetAllVisible(page);
1521  MarkBufferDirty(buf);
1522  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1523  vmbuffer, visibility_cutoff_xid, flags);
1524  }
1525 
1526  /*
1527  * As of PostgreSQL 9.2, the visibility map bit should never be set if
1528  * the page-level bit is clear. However, it's possible that the bit
1529  * got cleared after we checked it and before we took the buffer
1530  * content lock, so we must recheck before jumping to the conclusion
1531  * that something bad has happened.
1532  */
1533  else if (all_visible_according_to_vm && !PageIsAllVisible(page)
1534  && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
1535  {
1536  elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1537  relname, blkno);
1538  visibilitymap_clear(onerel, blkno, vmbuffer,
1540  }
1541 
1542  /*
1543  * It's possible for the value returned by GetOldestXmin() to move
1544  * backwards, so it's not wrong for us to see tuples that appear to
1545  * not be visible to everyone yet, while PD_ALL_VISIBLE is already
1546  * set. The real safe xmin value never moves backwards, but
1547  * GetOldestXmin() is conservative and sometimes returns a value
1548  * that's unnecessarily small, so if we see that contradiction it just
1549  * means that the tuples that we think are not visible to everyone yet
1550  * actually are, and the PD_ALL_VISIBLE flag is correct.
1551  *
1552  * There should never be dead tuples on a page with PD_ALL_VISIBLE
1553  * set, however.
1554  */
1555  else if (PageIsAllVisible(page) && has_dead_tuples)
1556  {
1557  elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
1558  relname, blkno);
1559  PageClearAllVisible(page);
1560  MarkBufferDirty(buf);
1561  visibilitymap_clear(onerel, blkno, vmbuffer,
1563  }
1564 
1565  /*
1566  * If the all-visible page is all-frozen but not marked as such yet,
1567  * mark it as all-frozen. Note that all_frozen is only valid if
1568  * all_visible is true, so we must check both.
1569  */
1570  else if (all_visible_according_to_vm && all_visible && all_frozen &&
1571  !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
1572  {
1573  /*
1574  * We can pass InvalidTransactionId as the cutoff XID here,
1575  * because setting the all-frozen bit doesn't cause recovery
1576  * conflicts.
1577  */
1578  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1579  vmbuffer, InvalidTransactionId,
1581  }
1582 
1583  UnlockReleaseBuffer(buf);
1584 
1585  /* Remember the location of the last page with nonremovable tuples */
1586  if (hastup)
1587  vacrelstats->nonempty_pages = blkno + 1;
1588 
1589  /*
1590  * If we remembered any tuples for deletion, then the page will be
1591  * visited again by lazy_vacuum_heap, which will compute and record
1592  * its post-compaction free space. If not, then we're done with this
1593  * page, so remember its free space as-is. (This path will always be
1594  * taken if there are no indexes.)
1595  */
1596  if (dead_tuples->num_tuples == prev_dead_count)
1597  RecordPageWithFreeSpace(onerel, blkno, freespace);
1598  }
1599 
1600  /* report that everything is scanned and vacuumed */
1602 
1603  pfree(frozen);
1604 
1605  /* save stats for use later */
1606  vacrelstats->tuples_deleted = tups_vacuumed;
1607  vacrelstats->new_dead_tuples = nkeep;
1608 
1609  /* now we can compute the new value for pg_class.reltuples */
1610  vacrelstats->new_live_tuples = vac_estimate_reltuples(onerel,
1611  nblocks,
1612  vacrelstats->tupcount_pages,
1613  live_tuples);
1614 
1615  /* also compute total number of surviving heap entries */
1616  vacrelstats->new_rel_tuples =
1617  vacrelstats->new_live_tuples + vacrelstats->new_dead_tuples;
1618 
1619  /*
1620  * Release any remaining pin on visibility map page.
1621  */
1622  if (BufferIsValid(vmbuffer))
1623  {
1624  ReleaseBuffer(vmbuffer);
1625  vmbuffer = InvalidBuffer;
1626  }
1627 
1628  /* If any tuples need to be deleted, perform final vacuum cycle */
1629  /* XXX put a threshold on min number of tuples here? */
1630  if (dead_tuples->num_tuples > 0)
1631  {
1632  /* Work on all the indexes, and then the heap */
1633  lazy_vacuum_all_indexes(onerel, Irel, indstats, vacrelstats,
1634  lps, nindexes);
1635 
1636  /* Remove tuples from heap */
1637  lazy_vacuum_heap(onerel, vacrelstats);
1638  }
1639 
1640  /*
1641  * Vacuum the remainder of the Free Space Map. We must do this whether or
1642  * not there were indexes.
1643  */
1644  if (blkno > next_fsm_block_to_vacuum)
1645  FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno);
1646 
1647  /* report all blocks vacuumed */
1649 
1650  /* Do post-vacuum cleanup */
1651  if (vacrelstats->useindex)
1652  lazy_cleanup_all_indexes(Irel, indstats, vacrelstats, lps, nindexes);
1653 
1654  /*
1655  * End parallel mode before updating index statistics as we cannot write
1656  * during parallel mode.
1657  */
1658  if (ParallelVacuumIsActive(lps))
1659  end_parallel_vacuum(Irel, indstats, lps, nindexes);
1660 
1661  /* Update index statistics */
1662  update_index_statistics(Irel, indstats, nindexes);
1663 
1664  /* If no indexes, make log report that lazy_vacuum_heap would've made */
1665  if (vacuumed_pages)
1666  ereport(elevel,
1667  (errmsg("\"%s\": removed %.0f row versions in %u pages",
1668  RelationGetRelationName(onerel),
1669  tups_vacuumed, vacuumed_pages)));
1670 
1671  /*
1672  * This is pretty messy, but we split it up so that we can skip emitting
1673  * individual parts of the message when not applicable.
1674  */
1675  initStringInfo(&buf);
1676  appendStringInfo(&buf,
1677  _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
1678  nkeep, OldestXmin);
1679  appendStringInfo(&buf, _("There were %.0f unused item identifiers.\n"),
1680  nunused);
1681  appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
1682  "Skipped %u pages due to buffer pins, ",
1683  vacrelstats->pinskipped_pages),
1684  vacrelstats->pinskipped_pages);
1685  appendStringInfo(&buf, ngettext("%u frozen page.\n",
1686  "%u frozen pages.\n",
1687  vacrelstats->frozenskipped_pages),
1688  vacrelstats->frozenskipped_pages);
1689  appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
1690  "%u pages are entirely empty.\n",
1691  empty_pages),
1692  empty_pages);
1693  appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0));
1694 
1695  ereport(elevel,
1696  (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
1697  RelationGetRelationName(onerel),
1698  tups_vacuumed, num_tuples,
1699  vacrelstats->scanned_pages, nblocks),
1700  errdetail_internal("%s", buf.data)));
1701  pfree(buf.data);
1702 }
double new_rel_tuples
Definition: vacuumlazy.c:283
void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, TransactionId *latestRemovedXid)
Definition: heapam.c:6905
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:84
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
Definition: vacuumlazy.c:1838
int heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin, bool report_stats, TransactionId *latestRemovedXid)
Definition: pruneheap.c:180
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3616
XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
Definition: heapam.c:7228
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define PageIsEmpty(page)
Definition: bufpage.h:222
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1009
BlockNumber rel_pages
Definition: vacuumlazy.c:277
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1123
OffsetNumber offset
Definition: heapam_xlog.h:321
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1769
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:334
LVDeadTuples * dead_tuples
Definition: vacuumlazy.c:289
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:513
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:181
#define PROGRESS_VACUUM_MAX_DEAD_TUPLES
Definition: progress.h:26
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
double tuples_deleted
Definition: vacuumlazy.c:287
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1405
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3161
#define VISIBILITYMAP_ALL_FROZEN
Definition: visibilitymap.h:27
static void lazy_record_dead_tuple(LVDeadTuples *dead_tuples, ItemPointer itemptr)
Definition: vacuumlazy.c:2756
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:589
bool useindex
Definition: vacuumlazy.c:274
BlockNumber tupcount_pages
Definition: vacuumlazy.c:281
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:34
unsigned char uint8
Definition: c.h:365
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define InvalidBuffer
Definition: buf.h:25
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
BlockNumber scanned_pages
Definition: vacuumlazy.c:278
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3322
BlockNumber pinskipped_pages
Definition: vacuumlazy.c:279
#define SizeOfPageHeaderData
Definition: bufpage.h:216
Form_pg_class rd_rel
Definition: rel.h:89
NameData relname
Definition: pg_class.h:38
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
int errdetail_internal(const char *fmt,...)
Definition: elog.c:984
uint16 OffsetNumber
Definition: off.h:24
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6128
#define FORCE_CHECK_PAGE()
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:676
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:110
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3345
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3731
#define ERROR
Definition: elog.h:43
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:658
ItemPointerData t_self
Definition: htup.h:65
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:324
static TransactionId FreezeLimit
Definition: vacuumlazy.c:300
static void lazy_vacuum_all_indexes(Relation onerel, Relation *Irel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes)
Definition: vacuumlazy.c:1710
uint32 t_len
Definition: htup.h:64
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6357
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3120
static char * buf
Definition: pg_test_fsync.c:67
#define PageSetAllVisible(page)
Definition: bufpage.h:387
#define FirstOffsetNumber
Definition: off.h:27
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:301
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:469
static TransactionId OldestXmin
Definition: vacuumlazy.c:299
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:157
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
double new_live_tuples
Definition: vacuumlazy.c:284
VacOptTernaryValue index_cleanup
Definition: vacuum.h:221
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:124
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
#define WARNING
Definition: elog.h:40
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
static int elevel
Definition: vacuumlazy.c:297
#define ngettext(s, p, n)
Definition: c.h:1146
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:230
static void end_parallel_vacuum(Relation *Irel, IndexBulkDeleteResult **stats, LVParallelState *lps, int nindexes)
Definition: vacuumlazy.c:3211
#define ParallelVacuumIsActive(lps)
Definition: vacuumlazy.c:145
void * palloc0(Size size)
Definition: mcxt.c:980
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:144
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3559
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:195
static void update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats, int nindexes)
Definition: vacuumlazy.c:3036
static void lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes)
Definition: vacuumlazy.c:2261
static LVParallelState * begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats, BlockNumber nblocks, int nindexes, int nrequested)
Definition: vacuumlazy.c:3067
#define ereport(elevel,...)
Definition: elog.h:144
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:303
#define PageClearAllVisible(page)
Definition: bufpage.h:389
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
#define Assert(condition)
Definition: c.h:738
double new_dead_tuples
Definition: vacuumlazy.c:285
TransactionId latestRemovedXid
Definition: vacuumlazy.c:291
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define VM_ALL_VISIBLE(r, b, v)
Definition: visibilitymap.h:32
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
Definition: pgstat.c:3183
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
size_t Size
Definition: c.h:466
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:30
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
int nworkers
Definition: vacuum.h:231
#define BufferIsValid(bufnum)
Definition: bufmgr.h:111
#define RelationNeedsWAL(relation)
Definition: rel.h:537
#define VISIBILITYMAP_ALL_VISIBLE
Definition: visibilitymap.h:26
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:544
#define PageGetLSN(page)
Definition: bufpage.h:366
BlockNumber nonempty_pages
Definition: vacuumlazy.c:288
#define PageIsNew(page)
Definition: bufpage.h:229
void * palloc(Size size)
Definition: mcxt.c:949
int errmsg(const char *fmt,...)
Definition: elog.c:824
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
BlockNumber frozenskipped_pages
Definition: vacuumlazy.c:280
#define elog(elevel,...)
Definition: elog.h:214
int i
int options
Definition: vacuum.h:210
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:85
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup)
Definition: vacuumlazy.c:1937
void vacuum_delay_point(void)
Definition: vacuum.c:1997
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define _(x)
Definition: elog.c:88
#define RelationGetRelid(relation)
Definition: rel.h:435
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:352
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
Definition: vacuumlazy.c:2738
#define RelationGetNamespace(relation)
Definition: rel.h:476

◆ lazy_space_alloc()

static void lazy_space_alloc ( LVRelStats vacrelstats,
BlockNumber  relblocks 
)
static

Definition at line 2738 of file vacuumlazy.c.

References compute_max_dead_tuples(), LVRelStats::dead_tuples, LVDeadTuples::max_tuples, LVDeadTuples::num_tuples, palloc(), SizeOfDeadTuples, and LVRelStats::useindex.

Referenced by lazy_scan_heap().

2739 {
2740  LVDeadTuples *dead_tuples = NULL;
2741  long maxtuples;
2742 
2743  maxtuples = compute_max_dead_tuples(relblocks, vacrelstats->useindex);
2744 
2745  dead_tuples = (LVDeadTuples *) palloc(SizeOfDeadTuples(maxtuples));
2746  dead_tuples->num_tuples = 0;
2747  dead_tuples->max_tuples = (int) maxtuples;
2748 
2749  vacrelstats->dead_tuples = dead_tuples;
2750 }
LVDeadTuples * dead_tuples
Definition: vacuumlazy.c:289
#define SizeOfDeadTuples(cnt)
Definition: vacuumlazy.c:163
bool useindex
Definition: vacuumlazy.c:274
static long compute_max_dead_tuples(BlockNumber relblocks, bool hasindex)
Definition: vacuumlazy.c:2706
void * palloc(Size size)
Definition: mcxt.c:949

◆ lazy_tid_reaped()

static bool lazy_tid_reaped ( ItemPointer  itemptr,
void *  state 
)
static

Definition at line 2780 of file vacuumlazy.c.

References LVDeadTuples::itemptrs, LVDeadTuples::num_tuples, and vac_cmp_itemptr().

Referenced by lazy_vacuum_index().

2781 {
2782  LVDeadTuples *dead_tuples = (LVDeadTuples *) state;
2783  ItemPointer res;
2784 
2785  res = (ItemPointer) bsearch((void *) itemptr,
2786  (void *) dead_tuples->itemptrs,
2787  dead_tuples->num_tuples,
2788  sizeof(ItemPointerData),
2789  vac_cmp_itemptr);
2790 
2791  return (res != NULL);
2792 }
ItemPointerData itemptrs[FLEXIBLE_ARRAY_MEMBER]
Definition: vacuumlazy.c:158
ItemPointerData * ItemPointer
Definition: itemptr.h:49
Definition: regguts.h:298
static int vac_cmp_itemptr(const void *left, const void *right)
Definition: vacuumlazy.c:2798

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 2438 of file vacuumlazy.c.

References AccessExclusiveLock, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), elevel, ereport, errdetail_internal(), errmsg(), LVRelStats::lock_waiter_detected, LVRelStats::nonempty_pages, LVRelStats::pages_removed, pg_rusage_init(), pg_rusage_show(), pg_usleep(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelStats::rel_pages, RelationGetNumberOfBlocks, RelationGetRelationName, RelationTruncate(), UnlockRelation(), VACUUM_TRUNCATE_LOCK_TIMEOUT, and VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL.

Referenced by heap_vacuum_rel().

2439 {
2440  BlockNumber old_rel_pages = vacrelstats->rel_pages;
2441  BlockNumber new_rel_pages;
2442  int lock_retry;
2443 
2444  /* Report that we are now truncating */
2447 
2448  /*
2449  * Loop until no more truncating can be done.
2450  */
2451  do
2452  {
2453  PGRUsage ru0;
2454 
2455  pg_rusage_init(&ru0);
2456 
2457  /*
2458  * We need full exclusive lock on the relation in order to do
2459  * truncation. If we can't get it, give up rather than waiting --- we
2460  * don't want to block other backends, and we don't want to deadlock
2461  * (which is quite possible considering we already hold a lower-grade
2462  * lock).
2463  */
2464  vacrelstats->lock_waiter_detected = false;
2465  lock_retry = 0;
2466  while (true)
2467  {
2469  break;
2470 
2471  /*
2472  * Check for interrupts while trying to (re-)acquire the exclusive
2473  * lock.
2474  */
2476 
2477  if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
2479  {
2480  /*
2481  * We failed to establish the lock in the specified number of
2482  * retries. This means we give up truncating.
2483  */
2484  vacrelstats->lock_waiter_detected = true;
2485  ereport(elevel,
2486  (errmsg("\"%s\": stopping truncate due to conflicting lock request",
2487  RelationGetRelationName(onerel))));
2488  return;
2489  }
2490 
2492  }
2493 
2494  /*
2495  * Now that we have exclusive lock, look to see if the rel has grown
2496  * whilst we were vacuuming with non-exclusive lock. If so, give up;
2497  * the newly added pages presumably contain non-deletable tuples.
2498  */
2499  new_rel_pages = RelationGetNumberOfBlocks(onerel);
2500  if (new_rel_pages != old_rel_pages)
2501  {
2502  /*
2503  * Note: we intentionally don't update vacrelstats->rel_pages with
2504  * the new rel size here. If we did, it would amount to assuming
2505  * that the new pages are empty, which is unlikely. Leaving the
2506  * numbers alone amounts to assuming that the new pages have the
2507  * same tuple density as existing ones, which is less unlikely.
2508  */
2510  return;
2511  }
2512 
2513  /*
2514  * Scan backwards from the end to verify that the end pages actually
2515  * contain no tuples. This is *necessary*, not optional, because
2516  * other backends could have added tuples to these pages whilst we
2517  * were vacuuming.
2518  */
2519  new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
2520 
2521  if (new_rel_pages >= old_rel_pages)
2522  {
2523  /* can't do anything after all */
2525  return;
2526  }
2527 
2528  /*
2529  * Okay to truncate.
2530  */
2531  RelationTruncate(onerel, new_rel_pages);
2532 
2533  /*
2534  * We can release the exclusive lock as soon as we have truncated.
2535  * Other backends can't safely access the relation until they have
2536  * processed the smgr invalidation that smgrtruncate sent out ... but
2537  * that should happen as part of standard invalidation processing once
2538  * they acquire lock on the relation.
2539  */
2541 
2542  /*
2543  * Update statistics. Here, it *is* correct to adjust rel_pages
2544  * without also touching reltuples, since the tuple count wasn't
2545  * changed by the truncation.
2546  */
2547  vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
2548  vacrelstats->rel_pages = new_rel_pages;
2549 
2550  ereport(elevel,
2551  (errmsg("\"%s\": truncated %u to %u pages",
2552  RelationGetRelationName(onerel),
2553  old_rel_pages, new_rel_pages),
2554  errdetail_internal("%s",
2555  pg_rusage_show(&ru0))));
2556  old_rel_pages = new_rel_pages;
2557  } while (new_rel_pages > vacrelstats->nonempty_pages &&
2558  vacrelstats->lock_waiter_detected);
2559 }
static BlockNumber count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:2567
BlockNumber rel_pages
Definition: vacuumlazy.c:277
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:282
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3161
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:102
uint32 BlockNumber
Definition: block.h:31
int errdetail_internal(const char *fmt,...)
Definition: elog.c:984
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:34
void pg_usleep(long microsec)
Definition: signal.c:53
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:247
#define RelationGetRelationName(relation)
Definition: rel.h:469
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:101
static int elevel
Definition: vacuumlazy.c:297
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:195
#define ereport(elevel,...)
Definition: elog.h:144
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define AccessExclusiveLock
Definition: lockdefs.h:45
BlockNumber pages_removed
Definition: vacuumlazy.c:286
BlockNumber nonempty_pages
Definition: vacuumlazy.c:288
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:229
bool lock_waiter_detected
Definition: vacuumlazy.c:292

◆ lazy_vacuum_all_indexes()

static void lazy_vacuum_all_indexes ( Relation  onerel,
Relation Irel,
IndexBulkDeleteResult **  stats,
LVRelStats vacrelstats,
LVParallelState lps,
int  nindexes 
)
static

Definition at line 1710 of file vacuumlazy.c.

References Assert, LVRelStats::dead_tuples, LVShared::estimated_count, LVShared::first_time, LVShared::for_cleanup, idx(), IsParallelWorker, lazy_parallel_vacuum_indexes(), lazy_vacuum_index(), LVParallelState::lvshared, LVRelStats::num_index_scans, LVRelStats::old_live_tuples, ParallelVacuumIsActive, pgstat_progress_update_param(), PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, LVShared::reltuples, and vacuum_log_cleanup_info().

Referenced by lazy_scan_heap().

1714 {
1716  Assert(nindexes > 0);
1717 
1718  /* Log cleanup info before we touch indexes */
1719  vacuum_log_cleanup_info(onerel, vacrelstats);
1720 
1721  /* Report that we are now vacuuming indexes */
1724 
1725  /* Perform index vacuuming with parallel workers for parallel vacuum. */
1726  if (ParallelVacuumIsActive(lps))
1727  {
1728  /* Tell parallel workers to do index vacuuming */
1729  lps->lvshared->for_cleanup = false;
1730  lps->lvshared->first_time = false;
1731 
1732  /*
1733  * We can only provide an approximate value of num_heap_tuples in
1734  * vacuum cases.
1735  */
1736  lps->lvshared->reltuples = vacrelstats->old_live_tuples;
1737  lps->lvshared->estimated_count = true;
1738 
1739  lazy_parallel_vacuum_indexes(Irel, stats, vacrelstats, lps, nindexes);
1740  }
1741  else
1742  {
1743  int idx;
1744 
1745  for (idx = 0; idx < nindexes; idx++)
1746  lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples,
1747  vacrelstats->old_live_tuples);
1748  }
1749 
1750  /* Increase and report the number of index scans */
1751  vacrelstats->num_index_scans++;
1753  vacrelstats->num_index_scans);
1754 }
bool estimated_count
Definition: vacuumlazy.c:200
LVDeadTuples * dead_tuples
Definition: vacuumlazy.c:289
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:31
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3161
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, LVDeadTuples *dead_tuples, double reltuples)
Definition: vacuumlazy.c:2315
#define IsParallelWorker()
Definition: parallel.h:61
bool first_time
Definition: vacuumlazy.c:188
double reltuples
Definition: vacuumlazy.c:199
static void lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes)
Definition: vacuumlazy.c:1989
#define ParallelVacuumIsActive(lps)
Definition: vacuumlazy.c:145
int num_index_scans
Definition: vacuumlazy.c:290
double old_live_tuples
Definition: vacuumlazy.c:282
static void vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:649
#define Assert(condition)
Definition: c.h:738
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
LVShared * lvshared
Definition: vacuumlazy.c:260
bool for_cleanup
Definition: vacuumlazy.c:187

◆ lazy_vacuum_heap()

static void lazy_vacuum_heap ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 1769 of file vacuumlazy.c.

References buf, BufferGetPage, BufferIsValid, ConditionalLockBufferForCleanup(), LVRelStats::dead_tuples, elevel, ereport, errdetail_internal(), errmsg(), InvalidBuffer, ItemPointerGetBlockNumber, LVDeadTuples::itemptrs, lazy_vacuum_page(), MAIN_FORKNUM, PageGetHeapFreeSpace(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), RelationGetRelationName, ReleaseBuffer(), UnlockReleaseBuffer(), and vacuum_delay_point().

Referenced by lazy_scan_heap().

1770 {
1771  int tupindex;
1772  int npages;
1773  PGRUsage ru0;
1774  Buffer vmbuffer = InvalidBuffer;
1775 
1776  /* Report that we are now vacuuming the heap */
1779 
1780  pg_rusage_init(&ru0);
1781  npages = 0;
1782 
1783  tupindex = 0;
1784  while (tupindex < vacrelstats->dead_tuples->num_tuples)
1785  {
1786  BlockNumber tblk;
1787  Buffer buf;
1788  Page page;
1789  Size freespace;
1790 
1792 
1793  tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples->itemptrs[tupindex]);
1794  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
1795  vac_strategy);
1797  {
1798  ReleaseBuffer(buf);
1799  ++tupindex;
1800  continue;
1801  }
1802  tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
1803  &vmbuffer);
1804 
1805  /* Now that we've compacted the page, record its available space */
1806  page = BufferGetPage(buf);
1807  freespace = PageGetHeapFreeSpace(page);
1808 
1809  UnlockReleaseBuffer(buf);
1810  RecordPageWithFreeSpace(onerel, tblk, freespace);
1811  npages++;
1812  }
1813 
1814  if (BufferIsValid(vmbuffer))
1815  {
1816  ReleaseBuffer(vmbuffer);
1817  vmbuffer = InvalidBuffer;
1818  }
1819 
1820  ereport(elevel,
1821  (errmsg("\"%s\": removed %d row versions in %d pages",
1822  RelationGetRelationName(onerel),
1823  tupindex, npages),
1824  errdetail_internal("%s", pg_rusage_show(&ru0))));
1825 }
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
Definition: vacuumlazy.c:1838
ItemPointerData itemptrs[FLEXIBLE_ARRAY_MEMBER]
Definition: vacuumlazy.c:158
LVDeadTuples * dead_tuples
Definition: vacuumlazy.c:289
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:181
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3161
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:589
#define InvalidBuffer
Definition: buf.h:25
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3322
int errdetail_internal(const char *fmt,...)
Definition: elog.c:984
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3345
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3731
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:658
static char * buf
Definition: pg_test_fsync.c:67
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define RelationGetRelationName(relation)
Definition: rel.h:469
#define BufferGetPage(buffer)
Definition: bufmgr.h:157
static int elevel
Definition: vacuumlazy.c:297
#define ereport(elevel,...)
Definition: elog.h:144
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:303
size_t Size
Definition: c.h:466
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define BufferIsValid(bufnum)
Definition: bufmgr.h:111
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void vacuum_delay_point(void)
Definition: vacuum.c:1997
int Buffer
Definition: buf.h:23
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:32
Pointer Page
Definition: bufpage.h:78

◆ lazy_vacuum_index()

static void lazy_vacuum_index ( Relation  indrel,
IndexBulkDeleteResult **  stats,
LVDeadTuples dead_tuples,
double  reltuples 
)
static

Definition at line 2315 of file vacuumlazy.c.

References IndexVacuumInfo::analyze_only, elevel, ereport, errdetail_internal(), errmsg(), IndexVacuumInfo::estimated_count, gettext_noop, IndexVacuumInfo::index, index_bulk_delete(), IsParallelWorker, lazy_tid_reaped(), IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pg_rusage_init(), pg_rusage_show(), RelationGetRelationName, IndexVacuumInfo::report_progress, IndexVacuumInfo::strategy, and vac_strategy.

Referenced by lazy_vacuum_all_indexes(), and vacuum_one_index().

2317 {
2318  IndexVacuumInfo ivinfo;
2319  const char *msg;
2320  PGRUsage ru0;
2321 
2322  pg_rusage_init(&ru0);
2323 
2324  ivinfo.index = indrel;
2325  ivinfo.analyze_only = false;
2326  ivinfo.report_progress = false;
2327  ivinfo.estimated_count = true;
2328  ivinfo.message_level = elevel;
2329  ivinfo.num_heap_tuples = reltuples;
2330  ivinfo.strategy = vac_strategy;
2331 
2332  /* Do bulk deletion */
2333  *stats = index_bulk_delete(&ivinfo, *stats,
2334  lazy_tid_reaped, (void *) dead_tuples);
2335 
2336  if (IsParallelWorker())
2337  msg = gettext_noop("scanned index \"%s\" to remove %d row versions by parallel vacuum worker");
2338  else
2339  msg = gettext_noop("scanned index \"%s\" to remove %d row versions");
2340 
2341  ereport(elevel,
2342  (errmsg(msg,
2343  RelationGetRelationName(indrel),
2344  dead_tuples->num_tuples),
2345  errdetail_internal("%s", pg_rusage_show(&ru0))));
2346 }
static bool lazy_tid_reaped(ItemPointer itemptr, void *state)
Definition: vacuumlazy.c:2780
bool analyze_only
Definition: genam.h:47
bool report_progress
Definition: genam.h:48
BufferAccessStrategy strategy
Definition: genam.h:52
#define gettext_noop(x)
Definition: c.h:1160
Relation index
Definition: genam.h:46
int errdetail_internal(const char *fmt,...)
Definition: elog.c:984
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define RelationGetRelationName(relation)
Definition: rel.h:469
#define IsParallelWorker()
Definition: parallel.h:61
static int elevel
Definition: vacuumlazy.c:297
IndexBulkDeleteResult * index_bulk_delete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: indexam.c:682
#define ereport(elevel,...)
Definition: elog.h:144
int message_level
Definition: genam.h:50
double num_heap_tuples
Definition: genam.h:51
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:303
int errmsg(const char *fmt,...)
Definition: elog.c:824
bool estimated_count
Definition: genam.h:49

◆ lazy_vacuum_page()

static int lazy_vacuum_page ( Relation  onerel,
BlockNumber  blkno,
Buffer  buffer,
int  tupindex,
LVRelStats vacrelstats,
Buffer vmbuffer 
)
static

Definition at line 1838 of file vacuumlazy.c.

References Assert, BufferGetPage, BufferIsValid, LVRelStats::dead_tuples, END_CRIT_SECTION, heap_page_is_all_visible(), InvalidXLogRecPtr, ItemIdSetUnused, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LVDeadTuples::itemptrs, LVRelStats::latestRemovedXid, log_heap_clean(), MarkBufferDirty(), MaxOffsetNumber, LVDeadTuples::num_tuples, PageGetItemId, PageIsAllVisible, PageRepairFragmentation(), PageSetAllVisible, PageSetLSN, pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, RelationNeedsWAL, START_CRIT_SECTION, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_get_status(), and visibilitymap_set().

Referenced by lazy_scan_heap(), and lazy_vacuum_heap().

1840 {
1841  LVDeadTuples *dead_tuples = vacrelstats->dead_tuples;
1842  Page page = BufferGetPage(buffer);
1843  OffsetNumber unused[MaxOffsetNumber];
1844  int uncnt = 0;
1845  TransactionId visibility_cutoff_xid;
1846  bool all_frozen;
1847 
1849 
1851 
1852  for (; tupindex < dead_tuples->num_tuples; tupindex++)
1853  {
1854  BlockNumber tblk;
1855  OffsetNumber toff;
1856  ItemId itemid;
1857 
1858  tblk = ItemPointerGetBlockNumber(&dead_tuples->itemptrs[tupindex]);
1859  if (tblk != blkno)
1860  break; /* past end of tuples for this block */
1861  toff = ItemPointerGetOffsetNumber(&dead_tuples->itemptrs[tupindex]);
1862  itemid = PageGetItemId(page, toff);
1863  ItemIdSetUnused(itemid);
1864  unused[uncnt++] = toff;
1865  }
1866 
1868 
1869  /*
1870  * Mark buffer dirty before we write WAL.
1871  */
1872  MarkBufferDirty(buffer);
1873 
1874  /* XLOG stuff */
1875  if (RelationNeedsWAL(onerel))
1876  {
1877  XLogRecPtr recptr;
1878 
1879  recptr = log_heap_clean(onerel, buffer,
1880  NULL, 0, NULL, 0,
1881  unused, uncnt,
1882  vacrelstats->latestRemovedXid);
1883  PageSetLSN(page, recptr);
1884  }
1885 
1886  /*
1887  * End critical section, so we safely can do visibility tests (which
1888  * possibly need to perform IO and allocate memory!). If we crash now the
1889  * page (including the corresponding vm bit) might not be marked all
1890  * visible, but that's fine. A later vacuum will fix that.
1891  */
1892  END_CRIT_SECTION();
1893 
1894  /*
1895  * Now that we have removed the dead tuples from the page, once again
1896  * check if the page has become all-visible. The page is already marked
1897  * dirty, exclusively locked, and, if needed, a full page image has been
1898  * emitted in the log_heap_clean() above.
1899  */
1900  if (heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid,
1901  &all_frozen))
1902  PageSetAllVisible(page);
1903 
1904  /*
1905  * All the changes to the heap page have been done. If the all-visible
1906  * flag is now set, also set the VM all-visible bit (and, if possible, the
1907  * all-frozen bit) unless this has already been done previously.
1908  */
1909  if (PageIsAllVisible(page))
1910  {
1911  uint8 vm_status = visibilitymap_get_status(onerel, blkno, vmbuffer);
1912  uint8 flags = 0;
1913 
1914  /* Set the VM all-frozen bit to flag, if needed */
1915  if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
1916  flags |= VISIBILITYMAP_ALL_VISIBLE;
1917  if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
1918  flags |= VISIBILITYMAP_ALL_FROZEN;
1919 
1920  Assert(BufferIsValid(*vmbuffer));
1921  if (flags != 0)
1922  visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr,
1923  *vmbuffer, visibility_cutoff_xid, flags);
1924  }
1925 
1926  return tupindex;
1927 }
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
ItemPointerData itemptrs[FLEXIBLE_ARRAY_MEMBER]
Definition: vacuumlazy.c:158
LVDeadTuples * dead_tuples
Definition: vacuumlazy.c:289
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:513
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1405
#define MaxOffsetNumber
Definition: off.h:28
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3161
#define VISIBILITYMAP_ALL_FROZEN
Definition: visibilitymap.h:27
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
unsigned char uint8
Definition: c.h:365
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
uint32 BlockNumber
Definition: block.h:31
uint16 OffsetNumber
Definition: off.h:24
#define PageSetAllVisible(page)
Definition: bufpage.h:387
#define BufferGetPage(buffer)
Definition: bufmgr.h:157
static bool heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:2831
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:738
TransactionId latestRemovedXid
Definition: vacuumlazy.c:291
XLogRecPtr log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
Definition: heapam.c:7177
#define BufferIsValid(bufnum)
Definition: bufmgr.h:111
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:537
#define VISIBILITYMAP_ALL_VISIBLE
Definition: visibilitymap.h:26
void PageRepairFragmentation(Page page)
Definition: bufpage.c:475
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
Pointer Page
Definition: bufpage.h:78

◆ parallel_vacuum_index()

static void parallel_vacuum_index ( Relation Irel,
IndexBulkDeleteResult **  stats,
LVShared lvshared,
LVDeadTuples dead_tuples,
int  nindexes 
)
static

Definition at line 2107 of file vacuumlazy.c.

References get_indstats(), LVShared::idx, idx(), pg_atomic_add_fetch_u32(), pg_atomic_fetch_add_u32(), pg_atomic_sub_fetch_u32(), skip_parallel_vacuum_index(), vacuum_one_index(), and VacuumActiveNWorkers.

Referenced by lazy_parallel_vacuum_indexes(), and parallel_vacuum_main().

2110 {
2111  /*
2112  * Increment the active worker count if we are able to launch any worker.
2113  */
2116 
2117  /* Loop until all indexes are vacuumed */
2118  for (;;)
2119  {
2120  int idx;
2121  LVSharedIndStats *shared_indstats;
2122 
2123  /* Get an index number to process */
2124  idx = pg_atomic_fetch_add_u32(&(lvshared->idx), 1);
2125 
2126  /* Done for all indexes? */
2127  if (idx >= nindexes)
2128  break;
2129 
2130  /* Get the index statistics of this index from DSM */
2131  shared_indstats = get_indstats(lvshared, idx);
2132 
2133  /*
2134  * Skip processing indexes that doesn't participate in parallel
2135  * operation
2136  */
2137  if (shared_indstats == NULL ||
2138  skip_parallel_vacuum_index(Irel[idx], lvshared))
2139  continue;
2140 
2141  /* Do vacuum or cleanup of the index */
2142  vacuum_one_index(Irel[idx], &(stats[idx]), lvshared, shared_indstats,
2143  dead_tuples);
2144  }
2145 
2146  /*
2147  * We have completed the index vacuum so decrement the active worker
2148  * count.
2149  */
2152 }
pg_atomic_uint32 * VacuumActiveNWorkers
Definition: vacuum.c:77
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:401
static uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:386
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
static bool skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared)
Definition: vacuumlazy.c:3274
static LVSharedIndStats * get_indstats(LVShared *lvshared, int n)
Definition: vacuumlazy.c:3249
static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats, LVShared *lvshared, LVSharedIndStats *shared_indstats, LVDeadTuples *dead_tuples)
Definition: vacuumlazy.c:2201
pg_atomic_uint32 idx
Definition: vacuumlazy.c:231
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:328

◆ parallel_vacuum_main()

void parallel_vacuum_main ( dsm_segment seg,
shm_toc toc 
)

Definition at line 3314 of file vacuumlazy.c.

References LVShared::active_nworkers, Assert, LVShared::cost_balance, DEBUG1, debug_query_string, LVShared::elevel, elevel, ereport, errmsg(), LVShared::for_cleanup, maintenance_work_mem, LVShared::maintenance_work_mem_worker, palloc0(), parallel_vacuum_index(), PARALLEL_VACUUM_KEY_DEAD_TUPLES, PARALLEL_VACUUM_KEY_QUERY_TEXT, PARALLEL_VACUUM_KEY_SHARED, pfree(), pgstat_report_activity(), LVShared::relid, RowExclusiveLock, ShareUpdateExclusiveLock, shm_toc_lookup(), STATE_RUNNING, table_close(), table_open(), vac_close_indexes(), vac_open_indexes(), VacuumActiveNWorkers, VacuumCostActive, VacuumCostBalance, VacuumCostBalanceLocal, VacuumCostDelay, VacuumPageDirty, VacuumPageHit, VacuumPageMiss, and VacuumSharedCostBalance.

3315 {
3316  Relation onerel;
3317  Relation *indrels;
3318  LVShared *lvshared;
3319  LVDeadTuples *dead_tuples;
3320  int nindexes;
3321  char *sharedquery;
3322  IndexBulkDeleteResult **stats;
3323 
3325  false);
3326  elevel = lvshared->elevel;
3327 
3328  ereport(DEBUG1,
3329  (errmsg("starting parallel vacuum worker for %s",
3330  lvshared->for_cleanup ? "cleanup" : "bulk delete")));
3331 
3332  /* Set debug_query_string for individual workers */
3333  sharedquery = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_QUERY_TEXT, false);
3334  debug_query_string = sharedquery;
3336 
3337  /*
3338  * Open table. The lock mode is the same as the leader process. It's
3339  * okay because the lock mode does not conflict among the parallel
3340  * workers.
3341  */
3342  onerel = table_open(lvshared->relid, ShareUpdateExclusiveLock);
3343 
3344  /*
3345  * Open all indexes. indrels are sorted in order by OID, which should be
3346  * matched to the leader's one.
3347  */
3348  vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &indrels);
3349  Assert(nindexes > 0);
3350 
3351  /* Set dead tuple space */
3352  dead_tuples = (LVDeadTuples *) shm_toc_lookup(toc,
3354  false);
3355 
3356  /* Set cost-based vacuum delay */
3358  VacuumCostBalance = 0;
3359  VacuumPageHit = 0;
3360  VacuumPageMiss = 0;
3361  VacuumPageDirty = 0;
3363  VacuumSharedCostBalance = &(lvshared->cost_balance);
3364  VacuumActiveNWorkers = &(lvshared->active_nworkers);
3365 
3366  stats = (IndexBulkDeleteResult **)
3367  palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
3368 
3369  if (lvshared->maintenance_work_mem_worker > 0)
3371 
3372  /* Process indexes to perform vacuum/cleanup */
3373  parallel_vacuum_index(indrels, stats, lvshared, dead_tuples, nindexes);
3374 
3375  vac_close_indexes(nindexes, indrels, RowExclusiveLock);
3377  pfree(stats);
3378 }
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:1976
int64 VacuumPageMiss
Definition: globals.c:144
#define DEBUG1
Definition: elog.h:25
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:133
pg_atomic_uint32 * VacuumActiveNWorkers
Definition: vacuum.c:77
int VacuumCostBalance
Definition: globals.c:147
#define PARALLEL_VACUUM_KEY_DEAD_TUPLES
Definition: vacuumlazy.c:138
Oid relid
Definition: vacuumlazy.c:179
void pgstat_report_activity(BackendState state, const char *cmd_str)
Definition: pgstat.c:3062
int64 VacuumPageHit
Definition: globals.c:143
int64 VacuumPageDirty
Definition: globals.c:145
int maintenance_work_mem_worker
Definition: vacuumlazy.c:210
#define PARALLEL_VACUUM_KEY_QUERY_TEXT
Definition: vacuumlazy.c:139
pg_atomic_uint32 cost_balance
Definition: vacuumlazy.c:217
void pfree(void *pointer)
Definition: mcxt.c:1056
#define RowExclusiveLock
Definition: lockdefs.h:38
int elevel
Definition: vacuumlazy.c:180
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:1933
const char * debug_query_string
Definition: postgres.c:88
int VacuumCostBalanceLocal
Definition: vacuum.c:78
pg_atomic_uint32 * VacuumSharedCostBalance
Definition: vacuum.c:76
static int elevel
Definition: vacuumlazy.c:297
void * palloc0(Size size)
Definition: mcxt.c:980
#define PARALLEL_VACUUM_KEY_SHARED
Definition: vacuumlazy.c:137
pg_atomic_uint32 active_nworkers
Definition: vacuumlazy.c:224
#define ereport(elevel,...)
Definition: elog.h:144
int maintenance_work_mem
Definition: globals.c:122
static void parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats, LVShared *lvshared, LVDeadTuples *dead_tuples, int nindexes)
Definition: vacuumlazy.c:2107
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
#define Assert(condition)
Definition: c.h:738
bool for_cleanup
Definition: vacuumlazy.c:187
int errmsg(const char *fmt,...)
Definition: elog.c:824
double VacuumCostDelay
Definition: globals.c:141
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:39
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
bool VacuumCostActive
Definition: globals.c:148

◆ prepare_index_statistics()

static void prepare_index_statistics ( LVShared lvshared,
bool can_parallel_vacuum,
int  nindexes 
)
static

Definition at line 3011 of file vacuumlazy.c.

References Assert, LVShared::bitmap, BITMAPLEN, i, and IsAutoVacuumWorkerProcess().

Referenced by begin_parallel_vacuum().

3013 {
3014  int i;
3015 
3016  /* Currently, we don't support parallel vacuum for autovacuum */
3018 
3019  /* Set NULL for all indexes */
3020  memset(lvshared->bitmap, 0x00, BITMAPLEN(nindexes));
3021 
3022  for (i = 0; i < nindexes; i++)
3023  {
3024  if (!can_parallel_vacuum[i])
3025  continue;
3026 
3027  /* Set NOT NULL as this index do support parallelism */
3028  lvshared->bitmap[i >> 3] |= 1 << (i & 0x07);
3029  }
3030 }
#define BITMAPLEN(NATTS)
Definition: htup_details.h:547
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3302
#define Assert(condition)
Definition: c.h:738
bits8 bitmap[FLEXIBLE_ARRAY_MEMBER]
Definition: vacuumlazy.c:233
int i

◆ should_attempt_truncation()

static bool should_attempt_truncation ( VacuumParams params,
LVRelStats vacrelstats 
)
static

Definition at line 2417 of file vacuumlazy.c.

References LVRelStats::nonempty_pages, old_snapshot_threshold, LVRelStats::rel_pages, REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, VacuumParams::truncate, and VACOPT_TERNARY_DISABLED.

Referenced by heap_vacuum_rel().

2418 {
2419  BlockNumber possibly_freeable;
2420 
2421  if (params->truncate == VACOPT_TERNARY_DISABLED)
2422  return false;
2423 
2424  possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
2425  if (possibly_freeable > 0 &&
2426  (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
2427  possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
2429  return true;
2430  else
2431  return false;
2432 }
BlockNumber rel_pages
Definition: vacuumlazy.c:277
uint32 BlockNumber
Definition: block.h:31
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:90
VacOptTernaryValue truncate
Definition: vacuum.h:223
BlockNumber nonempty_pages
Definition: vacuumlazy.c:288
int old_snapshot_threshold
Definition: snapmgr.c:75
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:91

◆ skip_parallel_vacuum_index()

static bool skip_parallel_vacuum_index ( Relation  indrel,
LVShared lvshared 
)
static

Definition at line 3274 of file vacuumlazy.c.

References IndexAmRoutine::amparallelvacuumoptions, Assert, LVShared::first_time, LVShared::for_cleanup, RelationData::rd_indam, VACUUM_OPTION_PARALLEL_BULKDEL, VACUUM_OPTION_PARALLEL_CLEANUP, and VACUUM_OPTION_PARALLEL_COND_CLEANUP.

Referenced by parallel_vacuum_index(), and vacuum_indexes_leader().

3275 {
3276  uint8 vacoptions = indrel->rd_indam->amparallelvacuumoptions;
3277 
3278  /* first_time must be true only if for_cleanup is true */
3279  Assert(lvshared->for_cleanup || !lvshared->first_time);
3280 
3281  if (lvshared->for_cleanup)
3282  {
3283  /* Skip, if the index does not support parallel cleanup */
3284  if (((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) == 0) &&
3285  ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) == 0))
3286  return true;
3287 
3288  /*
3289  * Skip, if the index supports parallel cleanup conditionally, but we
3290  * have already processed the index (for bulkdelete). See the
3291  * comments for option VACUUM_OPTION_PARALLEL_COND_CLEANUP to know
3292  * when indexes support parallel cleanup conditionally.
3293  */
3294  if (!lvshared->first_time &&
3295  ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) != 0))
3296  return true;
3297  }
3298  else if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) == 0)
3299  {
3300  /* Skip if the index does not support parallel bulk deletion */
3301  return true;
3302  }
3303 
3304  return false;
3305 }
uint8 amparallelvacuumoptions
Definition: amapi.h:203
struct IndexAmRoutine * rd_indam
Definition: rel.h:168
unsigned char uint8
Definition: c.h:365
bool first_time
Definition: vacuumlazy.c:188
#define Assert(condition)
Definition: c.h:738
#define VACUUM_OPTION_PARALLEL_COND_CLEANUP
Definition: vacuum.h:52
#define VACUUM_OPTION_PARALLEL_BULKDEL
Definition: vacuum.h:45
bool for_cleanup
Definition: vacuumlazy.c:187
#define VACUUM_OPTION_PARALLEL_CLEANUP
Definition: vacuum.h:60

◆ update_index_statistics()

static void update_index_statistics ( Relation Irel,
IndexBulkDeleteResult **  stats,
int  nindexes 
)
static

Definition at line 3036 of file vacuumlazy.c.

References Assert, i, InvalidMultiXactId, InvalidTransactionId, IsInParallelMode(), pfree(), and vac_update_relstats().

Referenced by lazy_scan_heap().

3038 {
3039  int i;
3040 
3042 
3043  for (i = 0; i < nindexes; i++)
3044  {
3045  if (stats[i] == NULL || stats[i]->estimated_count)
3046  continue;
3047 
3048  /* Update index statistics */
3049  vac_update_relstats(Irel[i],
3050  stats[i]->num_pages,
3051  stats[i]->num_index_tuples,
3052  0,
3053  false,
3056  false);
3057  pfree(stats[i]);
3058  }
3059 }
void pfree(void *pointer)
Definition: mcxt.c:1056
bool IsInParallelMode(void)
Definition: xact.c:996
#define InvalidTransactionId
Definition: transam.h:31
#define InvalidMultiXactId
Definition: multixact.h:23
#define Assert(condition)
Definition: c.h:738
int i
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool in_outer_xact)
Definition: vacuum.c:1208

◆ vac_cmp_itemptr()

static int vac_cmp_itemptr ( const void *  left,
const void *  right 
)
static

Definition at line 2798 of file vacuumlazy.c.

References ItemPointerGetBlockNumber, and ItemPointerGetOffsetNumber.

Referenced by lazy_tid_reaped().

2799 {
2800  BlockNumber lblk,
2801  rblk;
2802  OffsetNumber loff,
2803  roff;
2804 
2805  lblk = ItemPointerGetBlockNumber((ItemPointer) left);
2806  rblk = ItemPointerGetBlockNumber((ItemPointer) right);
2807 
2808  if (lblk < rblk)
2809  return -1;
2810  if (lblk > rblk)
2811  return 1;
2812 
2813  loff = ItemPointerGetOffsetNumber((ItemPointer) left);
2814  roff = ItemPointerGetOffsetNumber((ItemPointer) right);
2815 
2816  if (loff < roff)
2817  return -1;
2818  if (loff > roff)
2819  return 1;
2820 
2821  return 0;
2822 }
uint32 BlockNumber
Definition: block.h:31
uint16 OffsetNumber
Definition: off.h:24
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98

◆ vacuum_indexes_leader()

static void vacuum_indexes_leader ( Relation Irel,
IndexBulkDeleteResult **  stats,
LVRelStats vacrelstats,
LVParallelState lps,
int  nindexes 
)
static

Definition at line 2159 of file vacuumlazy.c.

References Assert, LVRelStats::dead_tuples, get_indstats(), i, IsParallelWorker, LVParallelState::lvshared, pg_atomic_add_fetch_u32(), pg_atomic_sub_fetch_u32(), skip_parallel_vacuum_index(), vacuum_one_index(), and VacuumActiveNWorkers.

Referenced by lazy_parallel_vacuum_indexes().

2162 {
2163  int i;
2164 
2166 
2167  /*
2168  * Increment the active worker count if we are able to launch any worker.
2169  */
2172 
2173  for (i = 0; i < nindexes; i++)
2174  {
2175  LVSharedIndStats *shared_indstats;
2176 
2177  shared_indstats = get_indstats(lps->lvshared, i);
2178 
2179  /* Process the indexes skipped by parallel workers */
2180  if (shared_indstats == NULL ||
2181  skip_parallel_vacuum_index(Irel[i], lps->lvshared))
2182  vacuum_one_index(Irel[i], &(stats[i]), lps->lvshared,
2183  shared_indstats, vacrelstats->dead_tuples);
2184  }
2185 
2186  /*
2187  * We have completed the index vacuum so decrement the active worker
2188  * count.
2189  */
2192 }
pg_atomic_uint32 * VacuumActiveNWorkers
Definition: vacuum.c:77
LVDeadTuples * dead_tuples
Definition: vacuumlazy.c:289
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:401
static uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:386
static bool skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared)
Definition: vacuumlazy.c:3274
static LVSharedIndStats * get_indstats(LVShared *lvshared, int n)
Definition: vacuumlazy.c:3249
static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats, LVShared *lvshared, LVSharedIndStats *shared_indstats, LVDeadTuples *dead_tuples)
Definition: vacuumlazy.c:2201
#define IsParallelWorker()
Definition: parallel.h:61
#define Assert(condition)
Definition: c.h:738
LVShared * lvshared
Definition: vacuumlazy.c:260
int i

◆ vacuum_log_cleanup_info()

static void vacuum_log_cleanup_info ( Relation  rel,
LVRelStats vacrelstats 
)
static

Definition at line 649 of file vacuumlazy.c.

References LVRelStats::latestRemovedXid, log_heap_cleanup_info(), RelationData::rd_node, RelationNeedsWAL, TransactionIdIsValid, and XLogIsNeeded.

Referenced by lazy_vacuum_all_indexes().

650 {
651  /*
652  * Skip this for relations for which no WAL is to be written, or if we're
653  * not trying to support archive recovery.
654  */
655  if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
656  return;
657 
658  /*
659  * No need to write the record at all unless it contains a valid value
660  */
661  if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
662  (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
663 }
XLogRecPtr log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
Definition: heapam.c:7148
#define XLogIsNeeded()
Definition: xlog.h:182
RelFileNode rd_node
Definition: rel.h:55
TransactionId latestRemovedXid
Definition: vacuumlazy.c:291
#define RelationNeedsWAL(relation)
Definition: rel.h:537
#define TransactionIdIsValid(xid)
Definition: transam.h:41

◆ vacuum_one_index()

static void vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult **  stats,
LVShared lvshared,
LVSharedIndStats shared_indstats,
LVDeadTuples dead_tuples 
)
static

Definition at line 2201 of file vacuumlazy.c.

References LVShared::estimated_count, LVShared::for_cleanup, lazy_cleanup_index(), lazy_vacuum_index(), pfree(), LVShared::reltuples, LVSharedIndStats::stats, and LVSharedIndStats::updated.

Referenced by parallel_vacuum_index(), and vacuum_indexes_leader().

2204 {
2205  IndexBulkDeleteResult *bulkdelete_res = NULL;
2206 
2207  if (shared_indstats)
2208  {
2209  /* Get the space for IndexBulkDeleteResult */
2210  bulkdelete_res = &(shared_indstats->stats);
2211 
2212  /*
2213  * Update the pointer to the corresponding bulk-deletion result if
2214  * someone has already updated it.
2215  */
2216  if (shared_indstats->updated && *stats == NULL)
2217  *stats = bulkdelete_res;
2218  }
2219 
2220  /* Do vacuum or cleanup of the index */
2221  if (lvshared->for_cleanup)
2222  lazy_cleanup_index(indrel, stats, lvshared->reltuples,
2223  lvshared->estimated_count);
2224  else
2225  lazy_vacuum_index(indrel, stats, dead_tuples,
2226  lvshared->reltuples);
2227 
2228  /*
2229  * Copy the index bulk-deletion result returned from ambulkdelete and
2230  * amvacuumcleanup to the DSM segment if it's the first time to get it
2231  * from them, because they allocate it locally and it's possible that an
2232  * index will be vacuumed by the different vacuum process at the next
2233  * time. The copying of the result normally happens only after the first
2234  * time of index vacuuming. From the second time, we pass the result on
2235  * the DSM segment so that they then update it directly.
2236  *
2237  * Since all vacuum workers write the bulk-deletion result at different
2238  * slots we can write them without locking.
2239  */
2240  if (shared_indstats && !shared_indstats->updated && *stats != NULL)
2241  {
2242  memcpy(bulkdelete_res, *stats, sizeof(IndexBulkDeleteResult));
2243  shared_indstats->updated = true;
2244 
2245  /*
2246  * Now that the stats[idx] points to the DSM segment, we don't need
2247  * the locally allocated results.
2248  */
2249  pfree(*stats);
2250  *stats = bulkdelete_res;
2251  }
2252 }
static void lazy_cleanup_index(Relation indrel, IndexBulkDeleteResult **stats, double reltuples, bool estimated_count)
Definition: vacuumlazy.c:2355
bool estimated_count
Definition: vacuumlazy.c:200
void pfree(void *pointer)
Definition: mcxt.c:1056
static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, LVDeadTuples *dead_tuples, double reltuples)
Definition: vacuumlazy.c:2315
IndexBulkDeleteResult stats
Definition: vacuumlazy.c:251
double reltuples
Definition: vacuumlazy.c:199
bool for_cleanup
Definition: vacuumlazy.c:187

Variable Documentation

◆ elevel

◆ FreezeLimit

TransactionId FreezeLimit
static

Definition at line 300 of file vacuumlazy.c.

Referenced by heap_vacuum_rel(), lazy_check_needs_freeze(), and lazy_scan_heap().

◆ MultiXactCutoff

MultiXactId MultiXactCutoff
static

◆ OldestXmin

◆ vac_strategy

BufferAccessStrategy vac_strategy
static

Definition at line 303 of file vacuumlazy.c.

Referenced by lazy_cleanup_index(), and lazy_vacuum_index().