PostgreSQL Source Code  git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/genam.h"
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/tidstore.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xloginsert.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "common/int.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "utils/lsyscache.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel)
 
static bool heap_vac_scan_next_block (LVRelState *vacrel, BlockNumber *blkno, bool *all_visible_according_to_vm)
 
static void find_next_unskippable_block (LVRelState *vacrel, bool *skipsallvis)
 
static bool lazy_scan_new_or_empty (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
 
static void lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items)
 
static bool lazy_scan_noprune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static void lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_add (LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
 
static void dead_items_reset (LVRelState *vacrel)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_is_all_visible (LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
static void update_relstats_all_indexes (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static int cmpOffsetNumbers (const void *a, const void *b)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 87 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 93 of file vacuumlazy.c.

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 121 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 115 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 70 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 69 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 109 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 102 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 79 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 81 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 80 of file vacuumlazy.c.

Typedef Documentation

◆ LVRelState

typedef struct LVRelState LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 124 of file vacuumlazy.c.

125 {
132 } VacErrPhase;
VacErrPhase
Definition: vacuumlazy.c:125
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition: vacuumlazy.c:127
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition: vacuumlazy.c:128
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition: vacuumlazy.c:131
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition: vacuumlazy.c:130
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition: vacuumlazy.c:129
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition: vacuumlazy.c:126

Function Documentation

◆ cmpOffsetNumbers()

static int cmpOffsetNumbers ( const void *  a,
const void *  b 
)
static

Definition at line 1395 of file vacuumlazy.c.

1396 {
1397  return pg_cmp_u16(*(const OffsetNumber *) a, *(const OffsetNumber *) b);
1398 }
static int pg_cmp_u16(uint16 a, uint16 b)
Definition: int.h:592
int b
Definition: isn.c:69
int a
Definition: isn.c:68
uint16 OffsetNumber
Definition: off.h:24

References a, b, and pg_cmp_u16().

Referenced by lazy_scan_prune().

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool *  lock_waiter_detected 
)
static

Definition at line 2688 of file vacuumlazy.c.

2689 {
2690  BlockNumber blkno;
2691  BlockNumber prefetchedUntil;
2692  instr_time starttime;
2693 
2694  /* Initialize the starttime if we check for conflicting lock requests */
2695  INSTR_TIME_SET_CURRENT(starttime);
2696 
2697  /*
2698  * Start checking blocks at what we believe relation end to be and move
2699  * backwards. (Strange coding of loop control is needed because blkno is
2700  * unsigned.) To make the scan faster, we prefetch a few blocks at a time
2701  * in forward direction, so that OS-level readahead can kick in.
2702  */
2703  blkno = vacrel->rel_pages;
2705  "prefetch size must be power of 2");
2706  prefetchedUntil = InvalidBlockNumber;
2707  while (blkno > vacrel->nonempty_pages)
2708  {
2709  Buffer buf;
2710  Page page;
2711  OffsetNumber offnum,
2712  maxoff;
2713  bool hastup;
2714 
2715  /*
2716  * Check if another process requests a lock on our relation. We are
2717  * holding an AccessExclusiveLock here, so they will be waiting. We
2718  * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
2719  * only check if that interval has elapsed once every 32 blocks to
2720  * keep the number of system calls and actual shared lock table
2721  * lookups to a minimum.
2722  */
2723  if ((blkno % 32) == 0)
2724  {
2725  instr_time currenttime;
2726  instr_time elapsed;
2727 
2728  INSTR_TIME_SET_CURRENT(currenttime);
2729  elapsed = currenttime;
2730  INSTR_TIME_SUBTRACT(elapsed, starttime);
2731  if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
2733  {
2735  {
2736  ereport(vacrel->verbose ? INFO : DEBUG2,
2737  (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
2738  vacrel->relname)));
2739 
2740  *lock_waiter_detected = true;
2741  return blkno;
2742  }
2743  starttime = currenttime;
2744  }
2745  }
2746 
2747  /*
2748  * We don't insert a vacuum delay point here, because we have an
2749  * exclusive lock on the table which we want to hold for as short a
2750  * time as possible. We still need to check for interrupts however.
2751  */
2753 
2754  blkno--;
2755 
2756  /* If we haven't prefetched this lot yet, do so now. */
2757  if (prefetchedUntil > blkno)
2758  {
2759  BlockNumber prefetchStart;
2760  BlockNumber pblkno;
2761 
2762  prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
2763  for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
2764  {
2765  PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
2767  }
2768  prefetchedUntil = prefetchStart;
2769  }
2770 
2771  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
2772  vacrel->bstrategy);
2773 
2774  /* In this phase we only need shared access to the buffer */
2776 
2777  page = BufferGetPage(buf);
2778 
2779  if (PageIsNew(page) || PageIsEmpty(page))
2780  {
2782  continue;
2783  }
2784 
2785  hastup = false;
2786  maxoff = PageGetMaxOffsetNumber(page);
2787  for (offnum = FirstOffsetNumber;
2788  offnum <= maxoff;
2789  offnum = OffsetNumberNext(offnum))
2790  {
2791  ItemId itemid;
2792 
2793  itemid = PageGetItemId(page, offnum);
2794 
2795  /*
2796  * Note: any non-unused item should be taken as a reason to keep
2797  * this page. Even an LP_DEAD item makes truncation unsafe, since
2798  * we must not have cleaned out its index entries.
2799  */
2800  if (ItemIdIsUsed(itemid))
2801  {
2802  hastup = true;
2803  break; /* can stop scanning */
2804  }
2805  } /* scan along page */
2806 
2808 
2809  /* Done scanning if we found a tuple here */
2810  if (hastup)
2811  return blkno + 1;
2812  }
2813 
2814  /*
2815  * If we fall out of the loop, all the previously-thought-to-be-empty
2816  * pages still are; we need not bother to look at the last known-nonempty
2817  * page.
2818  */
2819  return vacrel->nonempty_pages;
2820 }
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:639
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4941
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5158
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:793
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:190
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:400
@ RBM_NORMAL
Definition: bufmgr.h:45
static bool PageIsEmpty(Page page)
Definition: bufpage.h:223
Pointer Page
Definition: bufpage.h:81
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
static bool PageIsNew(Page page)
Definition: bufpage.h:233
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:372
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:917
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define DEBUG2
Definition: elog.h:29
#define INFO
Definition: elog.h:34
#define ereport(elevel,...)
Definition: elog.h:149
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:194
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:362
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define FirstOffsetNumber
Definition: off.h:27
static char * buf
Definition: pg_test_fsync.c:72
@ MAIN_FORKNUM
Definition: relpath.h:58
bool verbose
Definition: vacuumlazy.c:173
BlockNumber nonempty_pages
Definition: vacuumlazy.c:194
Relation rel
Definition: vacuumlazy.c:137
BlockNumber rel_pages
Definition: vacuumlazy.c:188
BufferAccessStrategy bstrategy
Definition: vacuumlazy.c:142
char * relname
Definition: vacuumlazy.c:168
#define PREFETCH_SIZE
Definition: vacuumlazy.c:115
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:79

References AccessExclusiveLock, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BufferGetPage(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelState::nonempty_pages, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageIsNew(), PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelState::rel, LVRelState::rel_pages, LVRelState::relname, StaticAssertStmt, UnlockReleaseBuffer(), VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and LVRelState::verbose.

Referenced by lazy_truncate_heap().

◆ dead_items_add()

static void dead_items_add ( LVRelState vacrel,
BlockNumber  blkno,
OffsetNumber offsets,
int  num_offsets 
)
static

Definition at line 2895 of file vacuumlazy.c.

2897 {
2898  TidStore *dead_items = vacrel->dead_items;
2899  const int prog_index[2] = {
2902  };
2903  int64 prog_val[2];
2904 
2905  TidStoreSetBlockOffsets(dead_items, blkno, offsets, num_offsets);
2906  vacrel->dead_items_info->num_items += num_offsets;
2907 
2908  /* update the progress information */
2909  prog_val[0] = vacrel->dead_items_info->num_items;
2910  prog_val[1] = TidStoreMemoryUsage(dead_items);
2911  pgstat_progress_update_multi_param(2, prog_index, prog_val);
2912 }
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
#define PROGRESS_VACUUM_DEAD_TUPLE_BYTES
Definition: progress.h:27
#define PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS
Definition: progress.h:28
VacDeadItemsInfo * dead_items_info
Definition: vacuumlazy.c:186
TidStore * dead_items
Definition: vacuumlazy.c:185
int64 num_items
Definition: vacuum.h:288
void TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: tidstore.c:353
size_t TidStoreMemoryUsage(TidStore *ts)
Definition: tidstore.c:540

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::num_items, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS, TidStoreMemoryUsage(), and TidStoreSetBlockOffsets().

Referenced by lazy_scan_noprune(), and lazy_scan_prune().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 2830 of file vacuumlazy.c.

2831 {
2832  VacDeadItemsInfo *dead_items_info;
2833  int vac_work_mem = AmAutoVacuumWorkerProcess() &&
2834  autovacuum_work_mem != -1 ?
2836 
2837  /*
2838  * Initialize state for a parallel vacuum. As of now, only one worker can
2839  * be used for an index, so we invoke parallelism only if there are at
2840  * least two indexes on a table.
2841  */
2842  if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
2843  {
2844  /*
2845  * Since parallel workers cannot access data in temporary tables, we
2846  * can't perform parallel vacuum on them.
2847  */
2848  if (RelationUsesLocalBuffers(vacrel->rel))
2849  {
2850  /*
2851  * Give warning only if the user explicitly tries to perform a
2852  * parallel vacuum on the temporary table.
2853  */
2854  if (nworkers > 0)
2855  ereport(WARNING,
2856  (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
2857  vacrel->relname)));
2858  }
2859  else
2860  vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
2861  vacrel->nindexes, nworkers,
2862  vac_work_mem,
2863  vacrel->verbose ? INFO : DEBUG2,
2864  vacrel->bstrategy);
2865 
2866  /*
2867  * If parallel mode started, dead_items and dead_items_info spaces are
2868  * allocated in DSM.
2869  */
2870  if (ParallelVacuumIsActive(vacrel))
2871  {
2872  vacrel->dead_items = parallel_vacuum_get_dead_items(vacrel->pvs,
2873  &vacrel->dead_items_info);
2874  return;
2875  }
2876  }
2877 
2878  /*
2879  * Serial VACUUM case. Allocate both dead_items and dead_items_info
2880  * locally.
2881  */
2882 
2883  dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));
2884  dead_items_info->max_bytes = vac_work_mem * 1024L;
2885  dead_items_info->num_items = 0;
2886  vacrel->dead_items_info = dead_items_info;
2887 
2888  vacrel->dead_items = TidStoreCreateLocal(dead_items_info->max_bytes, true);
2889 }
int autovacuum_work_mem
Definition: autovacuum.c:119
#define WARNING
Definition: elog.h:36
int maintenance_work_mem
Definition: globals.c:132
void * palloc(Size size)
Definition: mcxt.c:1317
#define AmAutoVacuumWorkerProcess()
Definition: miscadmin.h:373
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:637
ParallelVacuumState * pvs
Definition: vacuumlazy.c:143
int nindexes
Definition: vacuumlazy.c:139
Relation * indrels
Definition: vacuumlazy.c:138
bool do_index_vacuuming
Definition: vacuumlazy.c:153
size_t max_bytes
Definition: vacuum.h:287
TidStore * TidStoreCreateLocal(size_t max_bytes, bool insert_only)
Definition: tidstore.c:162
#define ParallelVacuumIsActive(vacrel)
Definition: vacuumlazy.c:121
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int vac_work_mem, int elevel, BufferAccessStrategy bstrategy)
TidStore * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, VacDeadItemsInfo **dead_items_info_p)

References AmAutoVacuumWorkerProcess, autovacuum_work_mem, LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_vacuuming, ereport, errmsg(), LVRelState::indrels, INFO, maintenance_work_mem, VacDeadItemsInfo::max_bytes, LVRelState::nindexes, VacDeadItemsInfo::num_items, palloc(), parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, LVRelState::pvs, LVRelState::rel, RelationUsesLocalBuffers, LVRelState::relname, TidStoreCreateLocal(), LVRelState::verbose, and WARNING.

Referenced by heap_vacuum_rel().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 2940 of file vacuumlazy.c.

2941 {
2942  if (!ParallelVacuumIsActive(vacrel))
2943  {
2944  /* Don't bother with pfree here */
2945  return;
2946  }
2947 
2948  /* End parallel mode */
2949  parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
2950  vacrel->pvs = NULL;
2951 }
IndexBulkDeleteResult ** indstats
Definition: vacuumlazy.c:200
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)

References LVRelState::indstats, parallel_vacuum_end(), ParallelVacuumIsActive, and LVRelState::pvs.

Referenced by heap_vacuum_rel().

◆ dead_items_reset()

static void dead_items_reset ( LVRelState vacrel)
static

Definition at line 2918 of file vacuumlazy.c.

2919 {
2920  TidStore *dead_items = vacrel->dead_items;
2921 
2922  if (ParallelVacuumIsActive(vacrel))
2923  {
2925  return;
2926  }
2927 
2928  /* Recreate the tidstore with the same max_bytes limitation */
2929  TidStoreDestroy(dead_items);
2930  vacrel->dead_items = TidStoreCreateLocal(vacrel->dead_items_info->max_bytes, true);
2931 
2932  /* Reset the counter */
2933  vacrel->dead_items_info->num_items = 0;
2934 }
void TidStoreDestroy(TidStore *ts)
Definition: tidstore.c:325
void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::max_bytes, VacDeadItemsInfo::num_items, parallel_vacuum_reset_dead_items(), ParallelVacuumIsActive, LVRelState::pvs, TidStoreCreateLocal(), and TidStoreDestroy().

Referenced by lazy_vacuum().

◆ find_next_unskippable_block()

static void find_next_unskippable_block ( LVRelState vacrel,
bool *  skipsallvis 
)
static

Definition at line 1192 of file vacuumlazy.c.

1193 {
1194  BlockNumber rel_pages = vacrel->rel_pages;
1195  BlockNumber next_unskippable_block = vacrel->next_unskippable_block + 1;
1196  Buffer next_unskippable_vmbuffer = vacrel->next_unskippable_vmbuffer;
1197  bool next_unskippable_allvis;
1198 
1199  *skipsallvis = false;
1200 
1201  for (;;)
1202  {
1203  uint8 mapbits = visibilitymap_get_status(vacrel->rel,
1204  next_unskippable_block,
1205  &next_unskippable_vmbuffer);
1206 
1207  next_unskippable_allvis = (mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0;
1208 
1209  /*
1210  * A block is unskippable if it is not all visible according to the
1211  * visibility map.
1212  */
1213  if (!next_unskippable_allvis)
1214  {
1215  Assert((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0);
1216  break;
1217  }
1218 
1219  /*
1220  * Caller must scan the last page to determine whether it has tuples
1221  * (caller must have the opportunity to set vacrel->nonempty_pages).
1222  * This rule avoids having lazy_truncate_heap() take access-exclusive
1223  * lock on rel to attempt a truncation that fails anyway, just because
1224  * there are tuples on the last page (it is likely that there will be
1225  * tuples on other nearby pages as well, but those can be skipped).
1226  *
1227  * Implement this by always treating the last block as unsafe to skip.
1228  */
1229  if (next_unskippable_block == rel_pages - 1)
1230  break;
1231 
1232  /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1233  if (!vacrel->skipwithvm)
1234  break;
1235 
1236  /*
1237  * Aggressive VACUUM caller can't skip pages just because they are
1238  * all-visible. They may still skip all-frozen pages, which can't
1239  * contain XIDs < OldestXmin (XIDs that aren't already frozen by now).
1240  */
1241  if ((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0)
1242  {
1243  if (vacrel->aggressive)
1244  break;
1245 
1246  /*
1247  * All-visible block is safe to skip in non-aggressive case. But
1248  * remember that the final range contains such a block for later.
1249  */
1250  *skipsallvis = true;
1251  }
1252 
1253  next_unskippable_block++;
1254  }
1255 
1256  /* write the local variables back to vacrel */
1257  vacrel->next_unskippable_block = next_unskippable_block;
1258  vacrel->next_unskippable_allvis = next_unskippable_allvis;
1259  vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
1260 }
#define Assert(condition)
Definition: c.h:837
unsigned char uint8
Definition: c.h:490
Buffer next_unskippable_vmbuffer
Definition: vacuumlazy.c:216
bool aggressive
Definition: vacuumlazy.c:146
BlockNumber next_unskippable_block
Definition: vacuumlazy.c:214
bool skipwithvm
Definition: vacuumlazy.c:148
bool next_unskippable_allvis
Definition: vacuumlazy.c:215
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE

References LVRelState::aggressive, Assert, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_vmbuffer, LVRelState::rel, LVRelState::rel_pages, LVRelState::skipwithvm, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_get_status().

Referenced by heap_vac_scan_next_block().

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( LVRelState vacrel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool *  all_frozen 
)
static

Definition at line 2965 of file vacuumlazy.c.

2968 {
2969  Page page = BufferGetPage(buf);
2971  OffsetNumber offnum,
2972  maxoff;
2973  bool all_visible = true;
2974 
2975  *visibility_cutoff_xid = InvalidTransactionId;
2976  *all_frozen = true;
2977 
2978  maxoff = PageGetMaxOffsetNumber(page);
2979  for (offnum = FirstOffsetNumber;
2980  offnum <= maxoff && all_visible;
2981  offnum = OffsetNumberNext(offnum))
2982  {
2983  ItemId itemid;
2984  HeapTupleData tuple;
2985 
2986  /*
2987  * Set the offset number so that we can display it along with any
2988  * error that occurred while processing this tuple.
2989  */
2990  vacrel->offnum = offnum;
2991  itemid = PageGetItemId(page, offnum);
2992 
2993  /* Unused or redirect line pointers are of no interest */
2994  if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
2995  continue;
2996 
2997  ItemPointerSet(&(tuple.t_self), blockno, offnum);
2998 
2999  /*
3000  * Dead line pointers can have index pointers pointing to them. So
3001  * they can't be treated as visible
3002  */
3003  if (ItemIdIsDead(itemid))
3004  {
3005  all_visible = false;
3006  *all_frozen = false;
3007  break;
3008  }
3009 
3010  Assert(ItemIdIsNormal(itemid));
3011 
3012  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3013  tuple.t_len = ItemIdGetLength(itemid);
3014  tuple.t_tableOid = RelationGetRelid(vacrel->rel);
3015 
3016  switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
3017  buf))
3018  {
3019  case HEAPTUPLE_LIVE:
3020  {
3021  TransactionId xmin;
3022 
3023  /* Check comments in lazy_scan_prune. */
3025  {
3026  all_visible = false;
3027  *all_frozen = false;
3028  break;
3029  }
3030 
3031  /*
3032  * The inserter definitely committed. But is it old enough
3033  * that everyone sees it as committed?
3034  */
3035  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3036  if (!TransactionIdPrecedes(xmin,
3037  vacrel->cutoffs.OldestXmin))
3038  {
3039  all_visible = false;
3040  *all_frozen = false;
3041  break;
3042  }
3043 
3044  /* Track newest xmin on page. */
3045  if (TransactionIdFollows(xmin, *visibility_cutoff_xid) &&
3046  TransactionIdIsNormal(xmin))
3047  *visibility_cutoff_xid = xmin;
3048 
3049  /* Check whether this tuple is already frozen or not */
3050  if (all_visible && *all_frozen &&
3052  *all_frozen = false;
3053  }
3054  break;
3055 
3056  case HEAPTUPLE_DEAD:
3060  {
3061  all_visible = false;
3062  *all_frozen = false;
3063  break;
3064  }
3065  default:
3066  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3067  break;
3068  }
3069  } /* scan along page */
3070 
3071  /* Clear the offset information once we have processed the given page. */
3072  vacrel->offnum = InvalidOffsetNumber;
3073 
3074  return all_visible;
3075 }
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3724
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:354
uint32 TransactionId
Definition: c.h:631
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7648
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:128
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:129
@ HEAPTUPLE_LIVE
Definition: heapam.h:127
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:130
@ HEAPTUPLE_DEAD
Definition: heapam.h:126
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:309
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:320
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define InvalidOffsetNumber
Definition: off.h:26
#define RelationGetRelid(relation)
Definition: rel.h:505
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
OffsetNumber offnum
Definition: vacuumlazy.c:171
struct VacuumCutoffs cutoffs
Definition: vacuumlazy.c:158
TransactionId OldestXmin
Definition: vacuum.h:267
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:280
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:314
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdIsNormal(xid)
Definition: transam.h:42

References Assert, buf, BufferGetBlockNumber(), BufferGetPage(), LVRelState::cutoffs, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, and TransactionIdPrecedes().

Referenced by lazy_scan_prune(), and lazy_vacuum_heap_page().

◆ heap_vac_scan_next_block()

static bool heap_vac_scan_next_block ( LVRelState vacrel,
BlockNumber blkno,
bool *  all_visible_according_to_vm 
)
static

Definition at line 1094 of file vacuumlazy.c.

1096 {
1097  BlockNumber next_block;
1098 
1099  /* relies on InvalidBlockNumber + 1 overflowing to 0 on first call */
1100  next_block = vacrel->current_block + 1;
1101 
1102  /* Have we reached the end of the relation? */
1103  if (next_block >= vacrel->rel_pages)
1104  {
1106  {
1109  }
1110  *blkno = vacrel->rel_pages;
1111  return false;
1112  }
1113 
1114  /*
1115  * We must be in one of the three following states:
1116  */
1117  if (next_block > vacrel->next_unskippable_block ||
1119  {
1120  /*
1121  * 1. We have just processed an unskippable block (or we're at the
1122  * beginning of the scan). Find the next unskippable block using the
1123  * visibility map.
1124  */
1125  bool skipsallvis;
1126 
1127  find_next_unskippable_block(vacrel, &skipsallvis);
1128 
1129  /*
1130  * We now know the next block that we must process. It can be the
1131  * next block after the one we just processed, or something further
1132  * ahead. If it's further ahead, we can jump to it, but we choose to
1133  * do so only if we can skip at least SKIP_PAGES_THRESHOLD consecutive
1134  * pages. Since we're reading sequentially, the OS should be doing
1135  * readahead for us, so there's no gain in skipping a page now and
1136  * then. Skipping such a range might even discourage sequential
1137  * detection.
1138  *
1139  * This test also enables more frequent relfrozenxid advancement
1140  * during non-aggressive VACUUMs. If the range has any all-visible
1141  * pages then skipping makes updating relfrozenxid unsafe, which is a
1142  * real downside.
1143  */
1144  if (vacrel->next_unskippable_block - next_block >= SKIP_PAGES_THRESHOLD)
1145  {
1146  next_block = vacrel->next_unskippable_block;
1147  if (skipsallvis)
1148  vacrel->skippedallvis = true;
1149  }
1150  }
1151 
1152  /* Now we must be in one of the two remaining states: */
1153  if (next_block < vacrel->next_unskippable_block)
1154  {
1155  /*
1156  * 2. We are processing a range of blocks that we could have skipped
1157  * but chose not to. We know that they are all-visible in the VM,
1158  * otherwise they would've been unskippable.
1159  */
1160  *blkno = vacrel->current_block = next_block;
1161  *all_visible_according_to_vm = true;
1162  return true;
1163  }
1164  else
1165  {
1166  /*
1167  * 3. We reached the next unskippable block. Process it. On next
1168  * iteration, we will be back in state 1.
1169  */
1170  Assert(next_block == vacrel->next_unskippable_block);
1171 
1172  *blkno = vacrel->current_block = next_block;
1173  *all_visible_according_to_vm = vacrel->next_unskippable_allvis;
1174  return true;
1175  }
1176 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4924
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:351
BlockNumber current_block
Definition: vacuumlazy.c:213
bool skippedallvis
Definition: vacuumlazy.c:163
static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
Definition: vacuumlazy.c:1192
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:109

References Assert, BufferIsValid(), LVRelState::current_block, find_next_unskippable_block(), InvalidBlockNumber, InvalidBuffer, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_vmbuffer, LVRelState::rel_pages, ReleaseBuffer(), SKIP_PAGES_THRESHOLD, and LVRelState::skippedallvis.

Referenced by lazy_scan_heap().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 293 of file vacuumlazy.c.

295 {
296  LVRelState *vacrel;
297  bool verbose,
298  instrument,
299  skipwithvm,
300  frozenxid_updated,
301  minmulti_updated;
302  BlockNumber orig_rel_pages,
303  new_rel_pages,
304  new_rel_allvisible;
305  PGRUsage ru0;
306  TimestampTz starttime = 0;
307  PgStat_Counter startreadtime = 0,
308  startwritetime = 0;
309  WalUsage startwalusage = pgWalUsage;
310  BufferUsage startbufferusage = pgBufferUsage;
311  ErrorContextCallback errcallback;
312  char **indnames = NULL;
313 
314  verbose = (params->options & VACOPT_VERBOSE) != 0;
315  instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
316  params->log_min_duration >= 0));
317  if (instrument)
318  {
319  pg_rusage_init(&ru0);
320  starttime = GetCurrentTimestamp();
321  if (track_io_timing)
322  {
323  startreadtime = pgStatBlockReadTime;
324  startwritetime = pgStatBlockWriteTime;
325  }
326  }
327 
329  RelationGetRelid(rel));
330 
331  /*
332  * Setup error traceback support for ereport() first. The idea is to set
333  * up an error context callback to display additional information on any
334  * error during a vacuum. During different phases of vacuum, we update
335  * the state so that the error context callback always display current
336  * information.
337  *
338  * Copy the names of heap rel into local memory for error reporting
339  * purposes, too. It isn't always safe to assume that we can get the name
340  * of each rel. It's convenient for code in lazy_scan_heap to always use
341  * these temp copies.
342  */
343  vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
346  vacrel->relname = pstrdup(RelationGetRelationName(rel));
347  vacrel->indname = NULL;
349  vacrel->verbose = verbose;
350  errcallback.callback = vacuum_error_callback;
351  errcallback.arg = vacrel;
352  errcallback.previous = error_context_stack;
353  error_context_stack = &errcallback;
354 
355  /* Set up high level stuff about rel and its indexes */
356  vacrel->rel = rel;
357  vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes,
358  &vacrel->indrels);
359  vacrel->bstrategy = bstrategy;
360  if (instrument && vacrel->nindexes > 0)
361  {
362  /* Copy index names used by instrumentation (not error reporting) */
363  indnames = palloc(sizeof(char *) * vacrel->nindexes);
364  for (int i = 0; i < vacrel->nindexes; i++)
365  indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
366  }
367 
368  /*
369  * The index_cleanup param either disables index vacuuming and cleanup or
370  * forces it to go ahead when we would otherwise apply the index bypass
371  * optimization. The default is 'auto', which leaves the final decision
372  * up to lazy_vacuum().
373  *
374  * The truncate param allows user to avoid attempting relation truncation,
375  * though it can't force truncation to happen.
376  */
379  params->truncate != VACOPTVALUE_AUTO);
380 
381  /*
382  * While VacuumFailSafeActive is reset to false before calling this, we
383  * still need to reset it here due to recursive calls.
384  */
385  VacuumFailsafeActive = false;
386  vacrel->consider_bypass_optimization = true;
387  vacrel->do_index_vacuuming = true;
388  vacrel->do_index_cleanup = true;
389  vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
390  if (params->index_cleanup == VACOPTVALUE_DISABLED)
391  {
392  /* Force disable index vacuuming up-front */
393  vacrel->do_index_vacuuming = false;
394  vacrel->do_index_cleanup = false;
395  }
396  else if (params->index_cleanup == VACOPTVALUE_ENABLED)
397  {
398  /* Force index vacuuming. Note that failsafe can still bypass. */
399  vacrel->consider_bypass_optimization = false;
400  }
401  else
402  {
403  /* Default/auto, make all decisions dynamically */
405  }
406 
407  /* Initialize page counters explicitly (be tidy) */
408  vacrel->scanned_pages = 0;
409  vacrel->removed_pages = 0;
410  vacrel->frozen_pages = 0;
411  vacrel->lpdead_item_pages = 0;
412  vacrel->missed_dead_pages = 0;
413  vacrel->nonempty_pages = 0;
414  /* dead_items_alloc allocates vacrel->dead_items later on */
415 
416  /* Allocate/initialize output statistics state */
417  vacrel->new_rel_tuples = 0;
418  vacrel->new_live_tuples = 0;
419  vacrel->indstats = (IndexBulkDeleteResult **)
420  palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
421 
422  /* Initialize remaining counters (be tidy) */
423  vacrel->num_index_scans = 0;
424  vacrel->tuples_deleted = 0;
425  vacrel->tuples_frozen = 0;
426  vacrel->lpdead_items = 0;
427  vacrel->live_tuples = 0;
428  vacrel->recently_dead_tuples = 0;
429  vacrel->missed_dead_tuples = 0;
430 
431  /*
432  * Get cutoffs that determine which deleted tuples are considered DEAD,
433  * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
434  * the extent of the blocks that we'll scan in lazy_scan_heap. It has to
435  * happen in this order to ensure that the OldestXmin cutoff field works
436  * as an upper bound on the XIDs stored in the pages we'll actually scan
437  * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
438  *
439  * Next acquire vistest, a related cutoff that's used in pruning. We use
440  * vistest in combination with OldestXmin to ensure that
441  * heap_page_prune_and_freeze() always removes any deleted tuple whose
442  * xmax is < OldestXmin. lazy_scan_prune must never become confused about
443  * whether a tuple should be frozen or removed. (In the future we might
444  * want to teach lazy_scan_prune to recompute vistest from time to time,
445  * to increase the number of dead tuples it can prune away.)
446  */
447  vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
448  vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
449  vacrel->vistest = GlobalVisTestFor(rel);
450  /* Initialize state used to track oldest extant XID/MXID */
451  vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
452  vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
453  vacrel->skippedallvis = false;
454  skipwithvm = true;
456  {
457  /*
458  * Force aggressive mode, and disable skipping blocks using the
459  * visibility map (even those set all-frozen)
460  */
461  vacrel->aggressive = true;
462  skipwithvm = false;
463  }
464 
465  vacrel->skipwithvm = skipwithvm;
466 
467  if (verbose)
468  {
469  if (vacrel->aggressive)
470  ereport(INFO,
471  (errmsg("aggressively vacuuming \"%s.%s.%s\"",
472  vacrel->dbname, vacrel->relnamespace,
473  vacrel->relname)));
474  else
475  ereport(INFO,
476  (errmsg("vacuuming \"%s.%s.%s\"",
477  vacrel->dbname, vacrel->relnamespace,
478  vacrel->relname)));
479  }
480 
481  /*
482  * Allocate dead_items memory using dead_items_alloc. This handles
483  * parallel VACUUM initialization as part of allocating shared memory
484  * space used for dead_items. (But do a failsafe precheck first, to
485  * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
486  * is already dangerously old.)
487  */
489  dead_items_alloc(vacrel, params->nworkers);
490 
491  /*
492  * Call lazy_scan_heap to perform all required heap pruning, index
493  * vacuuming, and heap vacuuming (plus related processing)
494  */
495  lazy_scan_heap(vacrel);
496 
497  /*
498  * Free resources managed by dead_items_alloc. This ends parallel mode in
499  * passing when necessary.
500  */
501  dead_items_cleanup(vacrel);
503 
504  /*
505  * Update pg_class entries for each of rel's indexes where appropriate.
506  *
507  * Unlike the later update to rel's pg_class entry, this is not critical.
508  * Maintains relpages/reltuples statistics used by the planner only.
509  */
510  if (vacrel->do_index_cleanup)
512 
513  /* Done with rel's indexes */
514  vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
515 
516  /* Optionally truncate rel */
517  if (should_attempt_truncation(vacrel))
518  lazy_truncate_heap(vacrel);
519 
520  /* Pop the error context stack */
521  error_context_stack = errcallback.previous;
522 
523  /* Report that we are now doing final cleanup */
526 
527  /*
528  * Prepare to update rel's pg_class entry.
529  *
530  * Aggressive VACUUMs must always be able to advance relfrozenxid to a
531  * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
532  * Non-aggressive VACUUMs may advance them by any amount, or not at all.
533  */
534  Assert(vacrel->NewRelfrozenXid == vacrel->cutoffs.OldestXmin ||
536  vacrel->cutoffs.relfrozenxid,
537  vacrel->NewRelfrozenXid));
538  Assert(vacrel->NewRelminMxid == vacrel->cutoffs.OldestMxact ||
540  vacrel->cutoffs.relminmxid,
541  vacrel->NewRelminMxid));
542  if (vacrel->skippedallvis)
543  {
544  /*
545  * Must keep original relfrozenxid in a non-aggressive VACUUM that
546  * chose to skip an all-visible page range. The state that tracks new
547  * values will have missed unfrozen XIDs from the pages we skipped.
548  */
549  Assert(!vacrel->aggressive);
552  }
553 
554  /*
555  * For safety, clamp relallvisible to be not more than what we're setting
556  * pg_class.relpages to
557  */
558  new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
559  visibilitymap_count(rel, &new_rel_allvisible, NULL);
560  if (new_rel_allvisible > new_rel_pages)
561  new_rel_allvisible = new_rel_pages;
562 
563  /*
564  * Now actually update rel's pg_class entry.
565  *
566  * In principle new_live_tuples could be -1 indicating that we (still)
567  * don't know the tuple count. In practice that can't happen, since we
568  * scan every page that isn't skipped using the visibility map.
569  */
570  vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
571  new_rel_allvisible, vacrel->nindexes > 0,
572  vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
573  &frozenxid_updated, &minmulti_updated, false);
574 
575  /*
576  * Report results to the cumulative stats system, too.
577  *
578  * Deliberately avoid telling the stats system about LP_DEAD items that
579  * remain in the table due to VACUUM bypassing index and heap vacuuming.
580  * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
581  * It seems like a good idea to err on the side of not vacuuming again too
582  * soon in cases where the failsafe prevented significant amounts of heap
583  * vacuuming.
584  */
586  rel->rd_rel->relisshared,
587  Max(vacrel->new_live_tuples, 0),
588  vacrel->recently_dead_tuples +
589  vacrel->missed_dead_tuples);
591 
592  if (instrument)
593  {
594  TimestampTz endtime = GetCurrentTimestamp();
595 
596  if (verbose || params->log_min_duration == 0 ||
597  TimestampDifferenceExceeds(starttime, endtime,
598  params->log_min_duration))
599  {
600  long secs_dur;
601  int usecs_dur;
602  WalUsage walusage;
603  BufferUsage bufferusage;
605  char *msgfmt;
606  int32 diff;
607  double read_rate = 0,
608  write_rate = 0;
609  int64 total_blks_hit;
610  int64 total_blks_read;
611  int64 total_blks_dirtied;
612 
613  TimestampDifference(starttime, endtime, &secs_dur, &usecs_dur);
614  memset(&walusage, 0, sizeof(WalUsage));
615  WalUsageAccumDiff(&walusage, &pgWalUsage, &startwalusage);
616  memset(&bufferusage, 0, sizeof(BufferUsage));
617  BufferUsageAccumDiff(&bufferusage, &pgBufferUsage, &startbufferusage);
618 
619  total_blks_hit = bufferusage.shared_blks_hit +
620  bufferusage.local_blks_hit;
621  total_blks_read = bufferusage.shared_blks_read +
622  bufferusage.local_blks_read;
623  total_blks_dirtied = bufferusage.shared_blks_dirtied +
624  bufferusage.local_blks_dirtied;
625 
627  if (verbose)
628  {
629  /*
630  * Aggressiveness already reported earlier, in dedicated
631  * VACUUM VERBOSE ereport
632  */
633  Assert(!params->is_wraparound);
634  msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
635  }
636  else if (params->is_wraparound)
637  {
638  /*
639  * While it's possible for a VACUUM to be both is_wraparound
640  * and !aggressive, that's just a corner-case -- is_wraparound
641  * implies aggressive. Produce distinct output for the corner
642  * case all the same, just in case.
643  */
644  if (vacrel->aggressive)
645  msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
646  else
647  msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
648  }
649  else
650  {
651  if (vacrel->aggressive)
652  msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
653  else
654  msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
655  }
656  appendStringInfo(&buf, msgfmt,
657  vacrel->dbname,
658  vacrel->relnamespace,
659  vacrel->relname,
660  vacrel->num_index_scans);
661  appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total)\n"),
662  vacrel->removed_pages,
663  new_rel_pages,
664  vacrel->scanned_pages,
665  orig_rel_pages == 0 ? 100.0 :
666  100.0 * vacrel->scanned_pages / orig_rel_pages);
668  _("tuples: %lld removed, %lld remain, %lld are dead but not yet removable\n"),
669  (long long) vacrel->tuples_deleted,
670  (long long) vacrel->new_rel_tuples,
671  (long long) vacrel->recently_dead_tuples);
672  if (vacrel->missed_dead_tuples > 0)
674  _("tuples missed: %lld dead from %u pages not removed due to cleanup lock contention\n"),
675  (long long) vacrel->missed_dead_tuples,
676  vacrel->missed_dead_pages);
677  diff = (int32) (ReadNextTransactionId() -
678  vacrel->cutoffs.OldestXmin);
680  _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
681  vacrel->cutoffs.OldestXmin, diff);
682  if (frozenxid_updated)
683  {
684  diff = (int32) (vacrel->NewRelfrozenXid -
685  vacrel->cutoffs.relfrozenxid);
687  _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
688  vacrel->NewRelfrozenXid, diff);
689  }
690  if (minmulti_updated)
691  {
692  diff = (int32) (vacrel->NewRelminMxid -
693  vacrel->cutoffs.relminmxid);
695  _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
696  vacrel->NewRelminMxid, diff);
697  }
698  appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %lld tuples frozen\n"),
699  vacrel->frozen_pages,
700  orig_rel_pages == 0 ? 100.0 :
701  100.0 * vacrel->frozen_pages / orig_rel_pages,
702  (long long) vacrel->tuples_frozen);
703  if (vacrel->do_index_vacuuming)
704  {
705  if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
706  appendStringInfoString(&buf, _("index scan not needed: "));
707  else
708  appendStringInfoString(&buf, _("index scan needed: "));
709 
710  msgfmt = _("%u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n");
711  }
712  else
713  {
715  appendStringInfoString(&buf, _("index scan bypassed: "));
716  else
717  appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
718 
719  msgfmt = _("%u pages from table (%.2f%% of total) have %lld dead item identifiers\n");
720  }
721  appendStringInfo(&buf, msgfmt,
722  vacrel->lpdead_item_pages,
723  orig_rel_pages == 0 ? 100.0 :
724  100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
725  (long long) vacrel->lpdead_items);
726  for (int i = 0; i < vacrel->nindexes; i++)
727  {
728  IndexBulkDeleteResult *istat = vacrel->indstats[i];
729 
730  if (!istat)
731  continue;
732 
734  _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
735  indnames[i],
736  istat->num_pages,
737  istat->pages_newly_deleted,
738  istat->pages_deleted,
739  istat->pages_free);
740  }
741  if (track_io_timing)
742  {
743  double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
744  double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
745 
746  appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
747  read_ms, write_ms);
748  }
749  if (secs_dur > 0 || usecs_dur > 0)
750  {
751  read_rate = (double) BLCKSZ * total_blks_read /
752  (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
753  write_rate = (double) BLCKSZ * total_blks_dirtied /
754  (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
755  }
756  appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
757  read_rate, write_rate);
759  _("buffer usage: %lld hits, %lld reads, %lld dirtied\n"),
760  (long long) total_blks_hit,
761  (long long) total_blks_read,
762  (long long) total_blks_dirtied);
764  _("WAL usage: %lld records, %lld full page images, %llu bytes\n"),
765  (long long) walusage.wal_records,
766  (long long) walusage.wal_fpi,
767  (unsigned long long) walusage.wal_bytes);
768  appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
769 
770  ereport(verbose ? INFO : LOG,
771  (errmsg_internal("%s", buf.data)));
772  pfree(buf.data);
773  }
774  }
775 
776  /* Cleanup index statistics and index names */
777  for (int i = 0; i < vacrel->nindexes; i++)
778  {
779  if (vacrel->indstats[i])
780  pfree(vacrel->indstats[i]);
781 
782  if (instrument)
783  pfree(indnames[i]);
784  }
785 }
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1720
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1780
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1644
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
bool track_io_timing
Definition: bufmgr.c:143
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:273
signed int int32
Definition: c.h:482
#define Max(x, y)
Definition: c.h:977
int64 TimestampTz
Definition: timestamp.h:39
char * get_database_name(Oid dbid)
Definition: dbcommands.c:3187
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1157
ErrorContextCallback * error_context_stack
Definition: elog.c:94
#define _(x)
Definition: elog.c:90
#define LOG
Definition: elog.h:31
Oid MyDatabaseId
Definition: globals.c:93
int verbose
WalUsage pgWalUsage
Definition: instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition: instrument.c:286
BufferUsage pgBufferUsage
Definition: instrument.c:20
void BufferUsageAccumDiff(BufferUsage *dst, const BufferUsage *add, const BufferUsage *sub)
Definition: instrument.c:248
int i
Definition: isn.c:72
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3366
char * pstrdup(const char *in)
Definition: mcxt.c:1696
void pfree(void *pointer)
Definition: mcxt.c:1521
void * palloc0(Size size)
Definition: mcxt.c:1347
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3331
#define InvalidMultiXactId
Definition: multixact.h:24
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int64 PgStat_Counter
Definition: pgstat.h:120
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4111
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:38
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define RelationGetRelationName(relation)
Definition: rel.h:539
#define RelationGetNamespace(relation)
Definition: rel.h:546
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:94
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:179
void initStringInfo(StringInfo str)
Definition: stringinfo.c:56
int64 shared_blks_dirtied
Definition: instrument.h:28
int64 local_blks_hit
Definition: instrument.h:30
int64 shared_blks_read
Definition: instrument.h:27
int64 local_blks_read
Definition: instrument.h:31
int64 local_blks_dirtied
Definition: instrument.h:32
int64 shared_blks_hit
Definition: instrument.h:26
struct ErrorContextCallback * previous
Definition: elog.h:296
void(* callback)(void *arg)
Definition: elog.h:297
BlockNumber pages_deleted
Definition: genam.h:82
BlockNumber pages_newly_deleted
Definition: genam.h:81
BlockNumber pages_free
Definition: genam.h:83
BlockNumber num_pages
Definition: genam.h:77
int64 tuples_deleted
Definition: vacuumlazy.c:205
bool do_rel_truncate
Definition: vacuumlazy.c:155
BlockNumber scanned_pages
Definition: vacuumlazy.c:189
GlobalVisState * vistest
Definition: vacuumlazy.c:159
BlockNumber removed_pages
Definition: vacuumlazy.c:190
int num_index_scans
Definition: vacuumlazy.c:203
double new_live_tuples
Definition: vacuumlazy.c:198
double new_rel_tuples
Definition: vacuumlazy.c:197
TransactionId NewRelfrozenXid
Definition: vacuumlazy.c:161
bool consider_bypass_optimization
Definition: vacuumlazy.c:150
int64 recently_dead_tuples
Definition: vacuumlazy.c:209
int64 tuples_frozen
Definition: vacuumlazy.c:206
BlockNumber frozen_pages
Definition: vacuumlazy.c:191
char * dbname
Definition: vacuumlazy.c:166
BlockNumber missed_dead_pages
Definition: vacuumlazy.c:193
char * relnamespace
Definition: vacuumlazy.c:167
int64 live_tuples
Definition: vacuumlazy.c:208
int64 lpdead_items
Definition: vacuumlazy.c:207
BlockNumber lpdead_item_pages
Definition: vacuumlazy.c:192
bool do_index_cleanup
Definition: vacuumlazy.c:154
MultiXactId NewRelminMxid
Definition: vacuumlazy.c:162
int64 missed_dead_tuples
Definition: vacuumlazy.c:210
VacErrPhase phase
Definition: vacuumlazy.c:172
char * indname
Definition: vacuumlazy.c:169
Form_pg_class rd_rel
Definition: rel.h:111
TransactionId FreezeLimit
Definition: vacuum.h:277
TransactionId relfrozenxid
Definition: vacuum.h:251
MultiXactId relminmxid
Definition: vacuum.h:252
MultiXactId MultiXactCutoff
Definition: vacuum.h:278
MultiXactId OldestMxact
Definition: vacuum.h:268
int nworkers
Definition: vacuum.h:239
VacOptValue truncate
Definition: vacuum.h:231
bits32 options
Definition: vacuum.h:219
bool is_wraparound
Definition: vacuum.h:226
int log_min_duration
Definition: vacuum.h:227
VacOptValue index_cleanup
Definition: vacuum.h:230
uint64 wal_bytes
Definition: instrument.h:55
int64 wal_fpi
Definition: instrument.h:54
int64 wal_records
Definition: instrument.h:53
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:299
static TransactionId ReadNextTransactionId(void)
Definition: transam.h:315
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:2298
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
Definition: vacuum.c:1410
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:2341
bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params, struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1084
bool VacuumFailsafeActive
Definition: vacuum.c:95
#define VACOPT_VERBOSE
Definition: vacuum.h:182
@ VACOPTVALUE_AUTO
Definition: vacuum.h:203
@ VACOPTVALUE_ENABLED
Definition: vacuum.h:205
@ VACOPTVALUE_UNSPECIFIED
Definition: vacuum.h:202
@ VACOPTVALUE_DISABLED
Definition: vacuum.h:204
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition: vacuum.h:188
static void dead_items_cleanup(LVRelState *vacrel)
Definition: vacuumlazy.c:2940
static void update_relstats_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3081
static void vacuum_error_callback(void *arg)
Definition: vacuumlazy.c:3116
static void lazy_truncate_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:2557
static bool should_attempt_truncation(LVRelState *vacrel)
Definition: vacuumlazy.c:2537
static void lazy_scan_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:824
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
Definition: vacuumlazy.c:2307
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
Definition: vacuumlazy.c:2830
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
bool IsInParallelMode(void)
Definition: xact.c:1088

References _, LVRelState::aggressive, AmAutoVacuumWorkerProcess, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert, LVRelState::bstrategy, buf, BufferUsageAccumDiff(), ErrorContextCallback::callback, LVRelState::consider_bypass_optimization, LVRelState::cutoffs, LVRelState::dbname, dead_items_alloc(), dead_items_cleanup(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errmsg(), errmsg_internal(), error_context_stack, VacuumCutoffs::FreezeLimit, LVRelState::frozen_pages, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), GlobalVisTestFor(), i, VacuumParams::index_cleanup, LVRelState::indname, LVRelState::indrels, LVRelState::indstats, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsInParallelMode(), lazy_check_wraparound_failsafe(), lazy_scan_heap(), lazy_truncate_heap(), LVRelState::live_tuples, BufferUsage::local_blks_dirtied, BufferUsage::local_blks_hit, BufferUsage::local_blks_read, LOG, VacuumParams::log_min_duration, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, Max, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, VacuumCutoffs::MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyDatabaseId, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, NoLock, LVRelState::nonempty_pages, LVRelState::num_index_scans, IndexBulkDeleteResult::num_pages, VacuumParams::nworkers, VacuumCutoffs::OldestMxact, VacuumCutoffs::OldestXmin, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgBufferUsage, pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, LVRelState::phase, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, pstrdup(), RelationData::rd_rel, ReadNextTransactionId(), LVRelState::recently_dead_tuples, LVRelState::rel, LVRelState::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, LVRelState::relname, LVRelState::relnamespace, LVRelState::removed_pages, RowExclusiveLock, LVRelState::scanned_pages, BufferUsage::shared_blks_dirtied, BufferUsage::shared_blks_hit, BufferUsage::shared_blks_read, should_attempt_truncation(), LVRelState::skippedallvis, LVRelState::skipwithvm, TimestampDifference(), TimestampDifferenceExceeds(), track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelState::tuples_deleted, LVRelState::tuples_frozen, update_relstats_all_indexes(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_get_cutoffs(), VacuumFailsafeActive, LVRelState::verbose, verbose, visibilitymap_count(), LVRelState::vistest, WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_records, and WalUsageAccumDiff().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 2307 of file vacuumlazy.c.

2308 {
2309  /* Don't warn more than once per VACUUM */
2311  return true;
2312 
2314  {
2315  const int progress_index[] = {
2318  };
2319  int64 progress_val[2] = {0, 0};
2320 
2321  VacuumFailsafeActive = true;
2322 
2323  /*
2324  * Abandon use of a buffer access strategy to allow use of all of
2325  * shared buffers. We assume the caller who allocated the memory for
2326  * the BufferAccessStrategy will free it.
2327  */
2328  vacrel->bstrategy = NULL;
2329 
2330  /* Disable index vacuuming, index cleanup, and heap rel truncation */
2331  vacrel->do_index_vacuuming = false;
2332  vacrel->do_index_cleanup = false;
2333  vacrel->do_rel_truncate = false;
2334 
2335  /* Reset the progress counters */
2336  pgstat_progress_update_multi_param(2, progress_index, progress_val);
2337 
2338  ereport(WARNING,
2339  (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2340  vacrel->dbname, vacrel->relnamespace, vacrel->relname,
2341  vacrel->num_index_scans),
2342  errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
2343  errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2344  "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2345 
2346  /* Stop applying cost limits from this point on */
2347  VacuumCostActive = false;
2348  VacuumCostBalance = 0;
2349 
2350  return true;
2351  }
2352 
2353  return false;
2354 }
#define unlikely(x)
Definition: c.h:326
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errhint(const char *fmt,...)
Definition: elog.c:1317
bool VacuumCostActive
Definition: globals.c:157
int VacuumCostBalance
Definition: globals.c:156
#define PROGRESS_VACUUM_INDEXES_PROCESSED
Definition: progress.h:30
#define PROGRESS_VACUUM_INDEXES_TOTAL
Definition: progress.h:29
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1252

References LVRelState::bstrategy, LVRelState::cutoffs, LVRelState::dbname, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errdetail(), errhint(), errmsg(), LVRelState::num_index_scans, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, LVRelState::relname, LVRelState::relnamespace, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, VacuumFailsafeActive, and WARNING.

Referenced by heap_vacuum_rel(), lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 2360 of file vacuumlazy.c.

2361 {
2362  double reltuples = vacrel->new_rel_tuples;
2363  bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
2364  const int progress_start_index[] = {
2367  };
2368  const int progress_end_index[] = {
2371  };
2372  int64 progress_start_val[2];
2373  int64 progress_end_val[2] = {0, 0};
2374 
2375  Assert(vacrel->do_index_cleanup);
2376  Assert(vacrel->nindexes > 0);
2377 
2378  /*
2379  * Report that we are now cleaning up indexes and the number of indexes to
2380  * cleanup.
2381  */
2382  progress_start_val[0] = PROGRESS_VACUUM_PHASE_INDEX_CLEANUP;
2383  progress_start_val[1] = vacrel->nindexes;
2384  pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
2385 
2386  if (!ParallelVacuumIsActive(vacrel))
2387  {
2388  for (int idx = 0; idx < vacrel->nindexes; idx++)
2389  {
2390  Relation indrel = vacrel->indrels[idx];
2391  IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2392 
2393  vacrel->indstats[idx] =
2394  lazy_cleanup_one_index(indrel, istat, reltuples,
2395  estimated_count, vacrel);
2396 
2397  /* Report the number of indexes cleaned up */
2399  idx + 1);
2400  }
2401  }
2402  else
2403  {
2404  /* Outsource everything to parallel variant */
2405  parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
2406  vacrel->num_index_scans,
2407  estimated_count);
2408  }
2409 
2410  /* Reset the progress counters */
2411  pgstat_progress_update_multi_param(2, progress_end_index, progress_end_val);
2412 }
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:36
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
Definition: vacuumlazy.c:2477
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)

References Assert, LVRelState::do_index_cleanup, idx(), LVRelState::indrels, LVRelState::indstats, lazy_cleanup_one_index(), LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::num_index_scans, parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelState::pvs, LVRelState::rel_pages, and LVRelState::scanned_pages.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 2477 of file vacuumlazy.c.

2480 {
2481  IndexVacuumInfo ivinfo;
2482  LVSavedErrInfo saved_err_info;
2483 
2484  ivinfo.index = indrel;
2485  ivinfo.heaprel = vacrel->rel;
2486  ivinfo.analyze_only = false;
2487  ivinfo.report_progress = false;
2488  ivinfo.estimated_count = estimated_count;
2489  ivinfo.message_level = DEBUG2;
2490 
2491  ivinfo.num_heap_tuples = reltuples;
2492  ivinfo.strategy = vacrel->bstrategy;
2493 
2494  /*
2495  * Update error traceback information.
2496  *
2497  * The index name is saved during this phase and restored immediately
2498  * after this phase. See vacuum_error_callback.
2499  */
2500  Assert(vacrel->indname == NULL);
2501  vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2502  update_vacuum_error_info(vacrel, &saved_err_info,
2505 
2506  istat = vac_cleanup_one_index(&ivinfo, istat);
2507 
2508  /* Revert to the previous phase information for error traceback */
2509  restore_vacuum_error_info(vacrel, &saved_err_info);
2510  pfree(vacrel->indname);
2511  vacrel->indname = NULL;
2512 
2513  return istat;
2514 }
Relation index
Definition: genam.h:46
double num_heap_tuples
Definition: genam.h:52
bool analyze_only
Definition: genam.h:48
BufferAccessStrategy strategy
Definition: genam.h:53
Relation heaprel
Definition: genam.h:47
bool report_progress
Definition: genam.h:49
int message_level
Definition: genam.h:51
bool estimated_count
Definition: genam.h:50
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition: vacuum.c:2537
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
Definition: vacuumlazy.c:3199
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
Definition: vacuumlazy.c:3180

References IndexVacuumInfo::analyze_only, Assert, LVRelState::bstrategy, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel)
static

Definition at line 824 of file vacuumlazy.c.

825 {
826  BlockNumber rel_pages = vacrel->rel_pages,
827  blkno,
828  next_fsm_block_to_vacuum = 0;
829  bool all_visible_according_to_vm;
830 
831  TidStore *dead_items = vacrel->dead_items;
832  VacDeadItemsInfo *dead_items_info = vacrel->dead_items_info;
833  Buffer vmbuffer = InvalidBuffer;
834  const int initprog_index[] = {
838  };
839  int64 initprog_val[3];
840 
841  /* Report that we're scanning the heap, advertising total # of blocks */
842  initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
843  initprog_val[1] = rel_pages;
844  initprog_val[2] = dead_items_info->max_bytes;
845  pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
846 
847  /* Initialize for the first heap_vac_scan_next_block() call */
850  vacrel->next_unskippable_allvis = false;
852 
853  while (heap_vac_scan_next_block(vacrel, &blkno, &all_visible_according_to_vm))
854  {
855  Buffer buf;
856  Page page;
857  bool has_lpdead_items;
858  bool got_cleanup_lock = false;
859 
860  vacrel->scanned_pages++;
861 
862  /* Report as block scanned, update error traceback information */
865  blkno, InvalidOffsetNumber);
866 
868 
869  /*
870  * Regularly check if wraparound failsafe should trigger.
871  *
872  * There is a similar check inside lazy_vacuum_all_indexes(), but
873  * relfrozenxid might start to look dangerously old before we reach
874  * that point. This check also provides failsafe coverage for the
875  * one-pass strategy, and the two-pass strategy with the index_cleanup
876  * param set to 'off'.
877  */
878  if (vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0)
880 
881  /*
882  * Consider if we definitely have enough space to process TIDs on page
883  * already. If we are close to overrunning the available space for
884  * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
885  * this page.
886  */
887  if (TidStoreMemoryUsage(dead_items) > dead_items_info->max_bytes)
888  {
889  /*
890  * Before beginning index vacuuming, we release any pin we may
891  * hold on the visibility map page. This isn't necessary for
892  * correctness, but we do it anyway to avoid holding the pin
893  * across a lengthy, unrelated operation.
894  */
895  if (BufferIsValid(vmbuffer))
896  {
897  ReleaseBuffer(vmbuffer);
898  vmbuffer = InvalidBuffer;
899  }
900 
901  /* Perform a round of index and heap vacuuming */
902  vacrel->consider_bypass_optimization = false;
903  lazy_vacuum(vacrel);
904 
905  /*
906  * Vacuum the Free Space Map to make newly-freed space visible on
907  * upper-level FSM pages. Note we have not yet processed blkno.
908  */
909  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
910  blkno);
911  next_fsm_block_to_vacuum = blkno;
912 
913  /* Report that we are once again scanning the heap */
916  }
917 
918  /*
919  * Pin the visibility map page in case we need to mark the page
920  * all-visible. In most cases this will be very cheap, because we'll
921  * already have the correct page pinned anyway.
922  */
923  visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
924 
925  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
926  vacrel->bstrategy);
927  page = BufferGetPage(buf);
928 
929  /*
930  * We need a buffer cleanup lock to prune HOT chains and defragment
931  * the page in lazy_scan_prune. But when it's not possible to acquire
932  * a cleanup lock right away, we may be able to settle for reduced
933  * processing using lazy_scan_noprune.
934  */
935  got_cleanup_lock = ConditionalLockBufferForCleanup(buf);
936 
937  if (!got_cleanup_lock)
939 
940  /* Check for new or empty pages before lazy_scan_[no]prune call */
941  if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock,
942  vmbuffer))
943  {
944  /* Processed as new/empty page (lock and pin released) */
945  continue;
946  }
947 
948  /*
949  * If we didn't get the cleanup lock, we can still collect LP_DEAD
950  * items in the dead_items area for later vacuuming, count live and
951  * recently dead tuples for vacuum logging, and determine if this
952  * block could later be truncated. If we encounter any xid/mxids that
953  * require advancing the relfrozenxid/relminxid, we'll have to wait
954  * for a cleanup lock and call lazy_scan_prune().
955  */
956  if (!got_cleanup_lock &&
957  !lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
958  {
959  /*
960  * lazy_scan_noprune could not do all required processing. Wait
961  * for a cleanup lock, and call lazy_scan_prune in the usual way.
962  */
963  Assert(vacrel->aggressive);
966  got_cleanup_lock = true;
967  }
968 
969  /*
970  * If we have a cleanup lock, we must now prune, freeze, and count
971  * tuples. We may have acquired the cleanup lock originally, or we may
972  * have gone back and acquired it after lazy_scan_noprune() returned
973  * false. Either way, the page hasn't been processed yet.
974  *
975  * Like lazy_scan_noprune(), lazy_scan_prune() will count
976  * recently_dead_tuples and live tuples for vacuum logging, determine
977  * if the block can later be truncated, and accumulate the details of
978  * remaining LP_DEAD line pointers on the page into dead_items. These
979  * dead items include those pruned by lazy_scan_prune() as well as
980  * line pointers previously marked LP_DEAD.
981  */
982  if (got_cleanup_lock)
983  lazy_scan_prune(vacrel, buf, blkno, page,
984  vmbuffer, all_visible_according_to_vm,
985  &has_lpdead_items);
986 
987  /*
988  * Now drop the buffer lock and, potentially, update the FSM.
989  *
990  * Our goal is to update the freespace map the last time we touch the
991  * page. If we'll process a block in the second pass, we may free up
992  * additional space on the page, so it is better to update the FSM
993  * after the second pass. If the relation has no indexes, or if index
994  * vacuuming is disabled, there will be no second heap pass; if this
995  * particular page has no dead items, the second heap pass will not
996  * touch this page. So, in those cases, update the FSM now.
997  *
998  * Note: In corner cases, it's possible to miss updating the FSM
999  * entirely. If index vacuuming is currently enabled, we'll skip the
1000  * FSM update now. But if failsafe mode is later activated, or there
1001  * are so few dead tuples that index vacuuming is bypassed, there will
1002  * also be no opportunity to update the FSM later, because we'll never
1003  * revisit this page. Since updating the FSM is desirable but not
1004  * absolutely required, that's OK.
1005  */
1006  if (vacrel->nindexes == 0
1007  || !vacrel->do_index_vacuuming
1008  || !has_lpdead_items)
1009  {
1010  Size freespace = PageGetHeapFreeSpace(page);
1011 
1013  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1014 
1015  /*
1016  * Periodically perform FSM vacuuming to make newly-freed space
1017  * visible on upper FSM pages. This is done after vacuuming if the
1018  * table has indexes. There will only be newly-freed space if we
1019  * held the cleanup lock and lazy_scan_prune() was called.
1020  */
1021  if (got_cleanup_lock && vacrel->nindexes == 0 && has_lpdead_items &&
1022  blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1023  {
1024  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1025  blkno);
1026  next_fsm_block_to_vacuum = blkno;
1027  }
1028  }
1029  else
1031  }
1032 
1033  vacrel->blkno = InvalidBlockNumber;
1034  if (BufferIsValid(vmbuffer))
1035  ReleaseBuffer(vmbuffer);
1036 
1037  /* report that everything is now scanned */
1039 
1040  /* now we can compute the new value for pg_class.reltuples */
1041  vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1042  vacrel->scanned_pages,
1043  vacrel->live_tuples);
1044 
1045  /*
1046  * Also compute the total number of surviving heap entries. In the
1047  * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1048  */
1049  vacrel->new_rel_tuples =
1050  Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1051  vacrel->missed_dead_tuples;
1052 
1053  /*
1054  * Do index vacuuming (call each index's ambulkdelete routine), then do
1055  * related heap vacuuming
1056  */
1057  if (dead_items_info->num_items > 0)
1058  lazy_vacuum(vacrel);
1059 
1060  /*
1061  * Vacuum the remainder of the Free Space Map. We must do this whether or
1062  * not there were indexes, and whether or not we bypassed index vacuuming.
1063  */
1064  if (blkno > next_fsm_block_to_vacuum)
1065  FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno);
1066 
1067  /* report all blocks vacuumed */
1069 
1070  /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1071  if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1072  lazy_cleanup_all_indexes(vacrel);
1073 }
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5238
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5399
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:189
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:980
size_t Size
Definition: c.h:584
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:377
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:194
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:33
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES
Definition: progress.h:26
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
BlockNumber blkno
Definition: vacuumlazy.c:170
void vacuum_delay_point(void)
Definition: vacuum.c:2362
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1314
static void lazy_vacuum(LVRelState *vacrel)
Definition: vacuumlazy.c:1867
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2360
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
Definition: vacuumlazy.c:1656
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
Definition: vacuumlazy.c:1291
static bool heap_vac_scan_next_block(LVRelState *vacrel, BlockNumber *blkno, bool *all_visible_according_to_vm)
Definition: vacuumlazy.c:1094
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items)
Definition: vacuumlazy.c:1414
#define FAILSAFE_EVERY_PAGES
Definition: vacuumlazy.c:93
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:102
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)

References LVRelState::aggressive, Assert, LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage(), BufferIsValid(), ConditionalLockBufferForCleanup(), LVRelState::consider_bypass_optimization, LVRelState::current_block, LVRelState::dead_items, LVRelState::dead_items_info, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, FAILSAFE_EVERY_PAGES, FreeSpaceMapVacuumRange(), heap_vac_scan_next_block(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_new_or_empty(), lazy_scan_noprune(), lazy_scan_prune(), lazy_vacuum(), LVRelState::live_tuples, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, Max, VacDeadItemsInfo::max_bytes, LVRelState::missed_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_vmbuffer, LVRelState::nindexes, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, RBM_NORMAL, ReadBufferExtended(), LVRelState::recently_dead_tuples, RecordPageWithFreeSpace(), LVRelState::rel, LVRelState::rel_pages, ReleaseBuffer(), LVRelState::scanned_pages, TidStoreMemoryUsage(), UnlockReleaseBuffer(), update_vacuum_error_info(), vac_estimate_reltuples(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, and visibilitymap_pin().

Referenced by heap_vacuum_rel().

◆ lazy_scan_new_or_empty()

static bool lazy_scan_new_or_empty ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool  sharelock,
Buffer  vmbuffer 
)
static

Definition at line 1291 of file vacuumlazy.c.

1293 {
1294  Size freespace;
1295 
1296  if (PageIsNew(page))
1297  {
1298  /*
1299  * All-zeroes pages can be left over if either a backend extends the
1300  * relation by a single page, but crashes before the newly initialized
1301  * page has been written out, or when bulk-extending the relation
1302  * (which creates a number of empty pages at the tail end of the
1303  * relation), and then enters them into the FSM.
1304  *
1305  * Note we do not enter the page into the visibilitymap. That has the
1306  * downside that we repeatedly visit this page in subsequent vacuums,
1307  * but otherwise we'll never discover the space on a promoted standby.
1308  * The harm of repeated checking ought to normally not be too bad. The
1309  * space usually should be used at some point, otherwise there
1310  * wouldn't be any regular vacuums.
1311  *
1312  * Make sure these pages are in the FSM, to ensure they can be reused.
1313  * Do that by testing if there's any space recorded for the page. If
1314  * not, enter it. We do so after releasing the lock on the heap page,
1315  * the FSM is approximate, after all.
1316  */
1318 
1319  if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1320  {
1321  freespace = BLCKSZ - SizeOfPageHeaderData;
1322 
1323  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1324  }
1325 
1326  return true;
1327  }
1328 
1329  if (PageIsEmpty(page))
1330  {
1331  /*
1332  * It seems likely that caller will always be able to get a cleanup
1333  * lock on an empty page. But don't take any chances -- escalate to
1334  * an exclusive lock (still don't need a cleanup lock, though).
1335  */
1336  if (sharelock)
1337  {
1340 
1341  if (!PageIsEmpty(page))
1342  {
1343  /* page isn't new or empty -- keep lock and pin for now */
1344  return false;
1345  }
1346  }
1347  else
1348  {
1349  /* Already have a full cleanup lock (which is more than enough) */
1350  }
1351 
1352  /*
1353  * Unlike new pages, empty pages are always set all-visible and
1354  * all-frozen.
1355  */
1356  if (!PageIsAllVisible(page))
1357  {
1359 
1360  /* mark buffer dirty before writing a WAL record */
1362 
1363  /*
1364  * It's possible that another backend has extended the heap,
1365  * initialized the page, and then failed to WAL-log the page due
1366  * to an ERROR. Since heap extension is not WAL-logged, recovery
1367  * might try to replay our record setting the page all-visible and
1368  * find that the page isn't initialized, which will cause a PANIC.
1369  * To prevent that, check whether the page has been previously
1370  * WAL-logged, and if not, do that now.
1371  */
1372  if (RelationNeedsWAL(vacrel->rel) &&
1373  PageGetLSN(page) == InvalidXLogRecPtr)
1374  log_newpage_buffer(buf, true);
1375 
1376  PageSetAllVisible(page);
1377  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1378  vmbuffer, InvalidTransactionId,
1380  END_CRIT_SECTION();
1381  }
1382 
1383  freespace = PageGetHeapFreeSpace(page);
1385  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1386  return true;
1387  }
1388 
1389  /* page isn't new or empty -- keep lock and pin */
1390  return false;
1391 }
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2532
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:191
#define SizeOfPageHeaderData
Definition: bufpage.h:216
static void PageSetAllVisible(Page page)
Definition: bufpage.h:434
static bool PageIsAllVisible(Page page)
Definition: bufpage.h:429
static XLogRecPtr PageGetLSN(const char *page)
Definition: bufpage.h:386
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:244
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151
#define RelationNeedsWAL(relation)
Definition: rel.h:628
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1237

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, END_CRIT_SECTION, GetRecordedFreeSpace(), InvalidTransactionId, InvalidXLogRecPtr, LockBuffer(), log_newpage_buffer(), MarkBufferDirty(), PageGetHeapFreeSpace(), PageGetLSN(), PageIsAllVisible(), PageIsEmpty(), PageIsNew(), PageSetAllVisible(), RecordPageWithFreeSpace(), LVRelState::rel, RelationNeedsWAL, SizeOfPageHeaderData, START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_set().

Referenced by lazy_scan_heap().

◆ lazy_scan_noprune()

static bool lazy_scan_noprune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool *  has_lpdead_items 
)
static

Definition at line 1656 of file vacuumlazy.c.

1661 {
1662  OffsetNumber offnum,
1663  maxoff;
1664  int lpdead_items,
1665  live_tuples,
1666  recently_dead_tuples,
1667  missed_dead_tuples;
1668  bool hastup;
1669  HeapTupleHeader tupleheader;
1670  TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
1671  MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
1672  OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
1673 
1674  Assert(BufferGetBlockNumber(buf) == blkno);
1675 
1676  hastup = false; /* for now */
1677 
1678  lpdead_items = 0;
1679  live_tuples = 0;
1680  recently_dead_tuples = 0;
1681  missed_dead_tuples = 0;
1682 
1683  maxoff = PageGetMaxOffsetNumber(page);
1684  for (offnum = FirstOffsetNumber;
1685  offnum <= maxoff;
1686  offnum = OffsetNumberNext(offnum))
1687  {
1688  ItemId itemid;
1689  HeapTupleData tuple;
1690 
1691  vacrel->offnum = offnum;
1692  itemid = PageGetItemId(page, offnum);
1693 
1694  if (!ItemIdIsUsed(itemid))
1695  continue;
1696 
1697  if (ItemIdIsRedirected(itemid))
1698  {
1699  hastup = true;
1700  continue;
1701  }
1702 
1703  if (ItemIdIsDead(itemid))
1704  {
1705  /*
1706  * Deliberately don't set hastup=true here. See same point in
1707  * lazy_scan_prune for an explanation.
1708  */
1709  deadoffsets[lpdead_items++] = offnum;
1710  continue;
1711  }
1712 
1713  hastup = true; /* page prevents rel truncation */
1714  tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1715  if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
1716  &NoFreezePageRelfrozenXid,
1717  &NoFreezePageRelminMxid))
1718  {
1719  /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
1720  if (vacrel->aggressive)
1721  {
1722  /*
1723  * Aggressive VACUUMs must always be able to advance rel's
1724  * relfrozenxid to a value >= FreezeLimit (and be able to
1725  * advance rel's relminmxid to a value >= MultiXactCutoff).
1726  * The ongoing aggressive VACUUM won't be able to do that
1727  * unless it can freeze an XID (or MXID) from this tuple now.
1728  *
1729  * The only safe option is to have caller perform processing
1730  * of this page using lazy_scan_prune. Caller might have to
1731  * wait a while for a cleanup lock, but it can't be helped.
1732  */
1733  vacrel->offnum = InvalidOffsetNumber;
1734  return false;
1735  }
1736 
1737  /*
1738  * Non-aggressive VACUUMs are under no obligation to advance
1739  * relfrozenxid (even by one XID). We can be much laxer here.
1740  *
1741  * Currently we always just accept an older final relfrozenxid
1742  * and/or relminmxid value. We never make caller wait or work a
1743  * little harder, even when it likely makes sense to do so.
1744  */
1745  }
1746 
1747  ItemPointerSet(&(tuple.t_self), blkno, offnum);
1748  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1749  tuple.t_len = ItemIdGetLength(itemid);
1750  tuple.t_tableOid = RelationGetRelid(vacrel->rel);
1751 
1752  switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
1753  buf))
1754  {
1756  case HEAPTUPLE_LIVE:
1757 
1758  /*
1759  * Count both cases as live, just like lazy_scan_prune
1760  */
1761  live_tuples++;
1762 
1763  break;
1764  case HEAPTUPLE_DEAD:
1765 
1766  /*
1767  * There is some useful work for pruning to do, that won't be
1768  * done due to failure to get a cleanup lock.
1769  */
1770  missed_dead_tuples++;
1771  break;
1773 
1774  /*
1775  * Count in recently_dead_tuples, just like lazy_scan_prune
1776  */
1777  recently_dead_tuples++;
1778  break;
1780 
1781  /*
1782  * Do not count these rows as live, just like lazy_scan_prune
1783  */
1784  break;
1785  default:
1786  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1787  break;
1788  }
1789  }
1790 
1791  vacrel->offnum = InvalidOffsetNumber;
1792 
1793  /*
1794  * By here we know for sure that caller can put off freezing and pruning
1795  * this particular page until the next VACUUM. Remember its details now.
1796  * (lazy_scan_prune expects a clean slate, so we have to do this last.)
1797  */
1798  vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
1799  vacrel->NewRelminMxid = NoFreezePageRelminMxid;
1800 
1801  /* Save any LP_DEAD items found on the page in dead_items */
1802  if (vacrel->nindexes == 0)
1803  {
1804  /* Using one-pass strategy (since table has no indexes) */
1805  if (lpdead_items > 0)
1806  {
1807  /*
1808  * Perfunctory handling for the corner case where a single pass
1809  * strategy VACUUM cannot get a cleanup lock, and it turns out
1810  * that there is one or more LP_DEAD items: just count the LP_DEAD
1811  * items as missed_dead_tuples instead. (This is a bit dishonest,
1812  * but it beats having to maintain specialized heap vacuuming code
1813  * forever, for vanishingly little benefit.)
1814  */
1815  hastup = true;
1816  missed_dead_tuples += lpdead_items;
1817  }
1818  }
1819  else if (lpdead_items > 0)
1820  {
1821  /*
1822  * Page has LP_DEAD items, and so any references/TIDs that remain in
1823  * indexes will be deleted during index vacuuming (and then marked
1824  * LP_UNUSED in the heap)
1825  */
1826  vacrel->lpdead_item_pages++;
1827 
1828  dead_items_add(vacrel, blkno, deadoffsets, lpdead_items);
1829 
1830  vacrel->lpdead_items += lpdead_items;
1831  }
1832 
1833  /*
1834  * Finally, add relevant page-local counts to whole-VACUUM counts
1835  */
1836  vacrel->live_tuples += live_tuples;
1837  vacrel->recently_dead_tuples += recently_dead_tuples;
1838  vacrel->missed_dead_tuples += missed_dead_tuples;
1839  if (missed_dead_tuples > 0)
1840  vacrel->missed_dead_pages++;
1841 
1842  /* Can't truncate this page */
1843  if (hastup)
1844  vacrel->nonempty_pages = blkno + 1;
1845 
1846  /* Did we find LP_DEAD items? */
1847  *has_lpdead_items = (lpdead_items > 0);
1848 
1849  /* Caller won't need to call lazy_scan_prune with same page */
1850  return true;
1851 }
TransactionId MultiXactId
Definition: c.h:641
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition: heapam.c:7703
#define MaxHeapTuplesPerPage
Definition: htup_details.h:572
static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: vacuumlazy.c:2895

References LVRelState::aggressive, Assert, buf, BufferGetBlockNumber(), LVRelState::cutoffs, dead_items_add(), elog, ERROR, FirstOffsetNumber, heap_tuple_should_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by lazy_scan_heap().

◆ lazy_scan_prune()

static void lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
Buffer  vmbuffer,
bool  all_visible_according_to_vm,
bool *  has_lpdead_items 
)
static

Definition at line 1414 of file vacuumlazy.c.

1421 {
1422  Relation rel = vacrel->rel;
1423  PruneFreezeResult presult;
1424  int prune_options = 0;
1425 
1426  Assert(BufferGetBlockNumber(buf) == blkno);
1427 
1428  /*
1429  * Prune all HOT-update chains and potentially freeze tuples on this page.
1430  *
1431  * If the relation has no indexes, we can immediately mark would-be dead
1432  * items LP_UNUSED.
1433  *
1434  * The number of tuples removed from the page is returned in
1435  * presult.ndeleted. It should not be confused with presult.lpdead_items;
1436  * presult.lpdead_items's final value can be thought of as the number of
1437  * tuples that were deleted from indexes.
1438  *
1439  * We will update the VM after collecting LP_DEAD items and freezing
1440  * tuples. Pruning will have determined whether or not the page is
1441  * all-visible.
1442  */
1443  prune_options = HEAP_PAGE_PRUNE_FREEZE;
1444  if (vacrel->nindexes == 0)
1445  prune_options |= HEAP_PAGE_PRUNE_MARK_UNUSED_NOW;
1446 
1447  heap_page_prune_and_freeze(rel, buf, vacrel->vistest, prune_options,
1448  &vacrel->cutoffs, &presult, PRUNE_VACUUM_SCAN,
1449  &vacrel->offnum,
1450  &vacrel->NewRelfrozenXid, &vacrel->NewRelminMxid);
1451 
1454 
1455  if (presult.nfrozen > 0)
1456  {
1457  /*
1458  * We don't increment the frozen_pages instrumentation counter when
1459  * nfrozen == 0, since it only counts pages with newly frozen tuples
1460  * (don't confuse that with pages newly set all-frozen in VM).
1461  */
1462  vacrel->frozen_pages++;
1463  }
1464 
1465  /*
1466  * VACUUM will call heap_page_is_all_visible() during the second pass over
1467  * the heap to determine all_visible and all_frozen for the page -- this
1468  * is a specialized version of the logic from this function. Now that
1469  * we've finished pruning and freezing, make sure that we're in total
1470  * agreement with heap_page_is_all_visible() using an assertion.
1471  */
1472 #ifdef USE_ASSERT_CHECKING
1473  /* Note that all_frozen value does not matter when !all_visible */
1474  if (presult.all_visible)
1475  {
1476  TransactionId debug_cutoff;
1477  bool debug_all_frozen;
1478 
1479  Assert(presult.lpdead_items == 0);
1480 
1481  if (!heap_page_is_all_visible(vacrel, buf,
1482  &debug_cutoff, &debug_all_frozen))
1483  Assert(false);
1484 
1485  Assert(presult.all_frozen == debug_all_frozen);
1486 
1487  Assert(!TransactionIdIsValid(debug_cutoff) ||
1488  debug_cutoff == presult.vm_conflict_horizon);
1489  }
1490 #endif
1491 
1492  /*
1493  * Now save details of the LP_DEAD items from the page in vacrel
1494  */
1495  if (presult.lpdead_items > 0)
1496  {
1497  vacrel->lpdead_item_pages++;
1498 
1499  /*
1500  * deadoffsets are collected incrementally in
1501  * heap_page_prune_and_freeze() as each dead line pointer is recorded,
1502  * with an indeterminate order, but dead_items_add requires them to be
1503  * sorted.
1504  */
1505  qsort(presult.deadoffsets, presult.lpdead_items, sizeof(OffsetNumber),
1507 
1508  dead_items_add(vacrel, blkno, presult.deadoffsets, presult.lpdead_items);
1509  }
1510 
1511  /* Finally, add page-local counts to whole-VACUUM counts */
1512  vacrel->tuples_deleted += presult.ndeleted;
1513  vacrel->tuples_frozen += presult.nfrozen;
1514  vacrel->lpdead_items += presult.lpdead_items;
1515  vacrel->live_tuples += presult.live_tuples;
1516  vacrel->recently_dead_tuples += presult.recently_dead_tuples;
1517 
1518  /* Can't truncate this page */
1519  if (presult.hastup)
1520  vacrel->nonempty_pages = blkno + 1;
1521 
1522  /* Did we find LP_DEAD items? */
1523  *has_lpdead_items = (presult.lpdead_items > 0);
1524 
1525  Assert(!presult.all_visible || !(*has_lpdead_items));
1526 
1527  /*
1528  * Handle setting visibility map bit based on information from the VM (as
1529  * of last heap_vac_scan_next_block() call), and from all_visible and
1530  * all_frozen variables
1531  */
1532  if (!all_visible_according_to_vm && presult.all_visible)
1533  {
1535 
1536  if (presult.all_frozen)
1537  {
1539  flags |= VISIBILITYMAP_ALL_FROZEN;
1540  }
1541 
1542  /*
1543  * It should never be the case that the visibility map page is set
1544  * while the page-level bit is clear, but the reverse is allowed (if
1545  * checksums are not enabled). Regardless, set both bits so that we
1546  * get back in sync.
1547  *
1548  * NB: If the heap page is all-visible but the VM bit is not set, we
1549  * don't need to dirty the heap page. However, if checksums are
1550  * enabled, we do need to make sure that the heap page is dirtied
1551  * before passing it to visibilitymap_set(), because it may be logged.
1552  * Given that this situation should only happen in rare cases after a
1553  * crash, it is not worth optimizing.
1554  */
1555  PageSetAllVisible(page);
1557  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1558  vmbuffer, presult.vm_conflict_horizon,
1559  flags);
1560  }
1561 
1562  /*
1563  * As of PostgreSQL 9.2, the visibility map bit should never be set if the
1564  * page-level bit is clear. However, it's possible that the bit got
1565  * cleared after heap_vac_scan_next_block() was called, so we must recheck
1566  * with buffer lock before concluding that the VM is corrupt.
1567  */
1568  else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
1569  visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
1570  {
1571  elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1572  vacrel->relname, blkno);
1573  visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1575  }
1576 
1577  /*
1578  * It's possible for the value returned by
1579  * GetOldestNonRemovableTransactionId() to move backwards, so it's not
1580  * wrong for us to see tuples that appear to not be visible to everyone
1581  * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
1582  * never moves backwards, but GetOldestNonRemovableTransactionId() is
1583  * conservative and sometimes returns a value that's unnecessarily small,
1584  * so if we see that contradiction it just means that the tuples that we
1585  * think are not visible to everyone yet actually are, and the
1586  * PD_ALL_VISIBLE flag is correct.
1587  *
1588  * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
1589  * however.
1590  */
1591  else if (presult.lpdead_items > 0 && PageIsAllVisible(page))
1592  {
1593  elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
1594  vacrel->relname, blkno);
1595  PageClearAllVisible(page);
1597  visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1599  }
1600 
1601  /*
1602  * If the all-visible page is all-frozen but not marked as such yet, mark
1603  * it as all-frozen. Note that all_frozen is only valid if all_visible is
1604  * true, so we must check both all_visible and all_frozen.
1605  */
1606  else if (all_visible_according_to_vm && presult.all_visible &&
1607  presult.all_frozen && !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
1608  {
1609  /*
1610  * Avoid relying on all_visible_according_to_vm as a proxy for the
1611  * page-level PD_ALL_VISIBLE bit being set, since it might have become
1612  * stale -- even when all_visible is set
1613  */
1614  if (!PageIsAllVisible(page))
1615  {
1616  PageSetAllVisible(page);
1618  }
1619 
1620  /*
1621  * Set the page all-frozen (and all-visible) in the VM.
1622  *
1623  * We can pass InvalidTransactionId as our cutoff_xid, since a
1624  * snapshotConflictHorizon sufficient to make everything safe for REDO
1625  * was logged when the page's tuples were frozen.
1626  */
1628  visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1629  vmbuffer, InvalidTransactionId,
1632  }
1633 }
static void PageClearAllVisible(Page page)
Definition: bufpage.h:439
#define HEAP_PAGE_PRUNE_FREEZE
Definition: heapam.h:43
@ PRUNE_VACUUM_SCAN
Definition: heapam.h:271
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
Definition: heapam.h:42
#define MultiXactIdIsValid(multi)
Definition: multixact.h:28
#define qsort(a, b, c, d)
Definition: port.h:447
void heap_page_prune_and_freeze(Relation relation, Buffer buffer, GlobalVisState *vistest, int options, struct VacuumCutoffs *cutoffs, PruneFreezeResult *presult, PruneReason reason, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
Definition: pruneheap.c:350
int recently_dead_tuples
Definition: heapam.h:235
TransactionId vm_conflict_horizon
Definition: heapam.h:250
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]
Definition: heapam.h:264
bool all_visible
Definition: heapam.h:248
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:2965
static int cmpOffsetNumbers(const void *a, const void *b)
Definition: vacuumlazy.c:1395
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:26
#define VISIBILITYMAP_VALID_BITS

References PruneFreezeResult::all_frozen, PruneFreezeResult::all_visible, Assert, buf, BufferGetBlockNumber(), cmpOffsetNumbers(), LVRelState::cutoffs, dead_items_add(), PruneFreezeResult::deadoffsets, elog, LVRelState::frozen_pages, PruneFreezeResult::hastup, heap_page_is_all_visible(), heap_page_prune_and_freeze(), HEAP_PAGE_PRUNE_FREEZE, HEAP_PAGE_PRUNE_MARK_UNUSED_NOW, InvalidTransactionId, InvalidXLogRecPtr, LVRelState::live_tuples, PruneFreezeResult::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, PruneFreezeResult::lpdead_items, MarkBufferDirty(), MultiXactIdIsValid, PruneFreezeResult::ndeleted, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, PruneFreezeResult::nfrozen, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, PageClearAllVisible(), PageIsAllVisible(), PageSetAllVisible(), PRUNE_VACUUM_SCAN, qsort, LVRelState::recently_dead_tuples, PruneFreezeResult::recently_dead_tuples, LVRelState::rel, LVRelState::relname, TransactionIdIsValid, LVRelState::tuples_deleted, LVRelState::tuples_frozen, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, LVRelState::vistest, VM_ALL_FROZEN, PruneFreezeResult::vm_conflict_horizon, and WARNING.

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 2557 of file vacuumlazy.c.

2558 {
2559  BlockNumber orig_rel_pages = vacrel->rel_pages;
2560  BlockNumber new_rel_pages;
2561  bool lock_waiter_detected;
2562  int lock_retry;
2563 
2564  /* Report that we are now truncating */
2567 
2568  /* Update error traceback information one last time */
2571 
2572  /*
2573  * Loop until no more truncating can be done.
2574  */
2575  do
2576  {
2577  /*
2578  * We need full exclusive lock on the relation in order to do
2579  * truncation. If we can't get it, give up rather than waiting --- we
2580  * don't want to block other backends, and we don't want to deadlock
2581  * (which is quite possible considering we already hold a lower-grade
2582  * lock).
2583  */
2584  lock_waiter_detected = false;
2585  lock_retry = 0;
2586  while (true)
2587  {
2589  break;
2590 
2591  /*
2592  * Check for interrupts while trying to (re-)acquire the exclusive
2593  * lock.
2594  */
2596 
2597  if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
2599  {
2600  /*
2601  * We failed to establish the lock in the specified number of
2602  * retries. This means we give up truncating.
2603  */
2604  ereport(vacrel->verbose ? INFO : DEBUG2,
2605  (errmsg("\"%s\": stopping truncate due to conflicting lock request",
2606  vacrel->relname)));
2607  return;
2608  }
2609 
2610  (void) WaitLatch(MyLatch,
2613  WAIT_EVENT_VACUUM_TRUNCATE);
2615  }
2616 
2617  /*
2618  * Now that we have exclusive lock, look to see if the rel has grown
2619  * whilst we were vacuuming with non-exclusive lock. If so, give up;
2620  * the newly added pages presumably contain non-deletable tuples.
2621  */
2622  new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
2623  if (new_rel_pages != orig_rel_pages)
2624  {
2625  /*
2626  * Note: we intentionally don't update vacrel->rel_pages with the
2627  * new rel size here. If we did, it would amount to assuming that
2628  * the new pages are empty, which is unlikely. Leaving the numbers
2629  * alone amounts to assuming that the new pages have the same
2630  * tuple density as existing ones, which is less unlikely.
2631  */
2633  return;
2634  }
2635 
2636  /*
2637  * Scan backwards from the end to verify that the end pages actually
2638  * contain no tuples. This is *necessary*, not optional, because
2639  * other backends could have added tuples to these pages whilst we
2640  * were vacuuming.
2641  */
2642  new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
2643  vacrel->blkno = new_rel_pages;
2644 
2645  if (new_rel_pages >= orig_rel_pages)
2646  {
2647  /* can't do anything after all */
2649  return;
2650  }
2651 
2652  /*
2653  * Okay to truncate.
2654  */
2655  RelationTruncate(vacrel->rel, new_rel_pages);
2656 
2657  /*
2658  * We can release the exclusive lock as soon as we have truncated.
2659  * Other backends can't safely access the relation until they have
2660  * processed the smgr invalidation that smgrtruncate sent out ... but
2661  * that should happen as part of standard invalidation processing once
2662  * they acquire lock on the relation.
2663  */
2665 
2666  /*
2667  * Update statistics. Here, it *is* correct to adjust rel_pages
2668  * without also touching reltuples, since the tuple count wasn't
2669  * changed by the truncation.
2670  */
2671  vacrel->removed_pages += orig_rel_pages - new_rel_pages;
2672  vacrel->rel_pages = new_rel_pages;
2673 
2674  ereport(vacrel->verbose ? INFO : DEBUG2,
2675  (errmsg("table \"%s\": truncated %u to %u pages",
2676  vacrel->relname,
2677  orig_rel_pages, new_rel_pages)));
2678  orig_rel_pages = new_rel_pages;
2679  } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
2680 }
struct Latch * MyLatch
Definition: globals.c:62
void ResetLatch(Latch *latch)
Definition: latch.c:724
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:517
#define WL_TIMEOUT
Definition: latch.h:130
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define WL_LATCH_SET
Definition: latch.h:127
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:309
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:274
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:37
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:288
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:80
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:81
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
Definition: vacuumlazy.c:2688

References AccessExclusiveLock, LVRelState::blkno, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), INFO, InvalidOffsetNumber, MyLatch, LVRelState::nonempty_pages, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationTruncate(), LVRelState::relname, LVRelState::removed_pages, ResetLatch(), UnlockRelation(), update_vacuum_error_info(), VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, LVRelState::verbose, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 1867 of file vacuumlazy.c.

1868 {
1869  bool bypass;
1870 
1871  /* Should not end up here with no indexes */
1872  Assert(vacrel->nindexes > 0);
1873  Assert(vacrel->lpdead_item_pages > 0);
1874 
1875  if (!vacrel->do_index_vacuuming)
1876  {
1877  Assert(!vacrel->do_index_cleanup);
1878  dead_items_reset(vacrel);
1879  return;
1880  }
1881 
1882  /*
1883  * Consider bypassing index vacuuming (and heap vacuuming) entirely.
1884  *
1885  * We currently only do this in cases where the number of LP_DEAD items
1886  * for the entire VACUUM operation is close to zero. This avoids sharp
1887  * discontinuities in the duration and overhead of successive VACUUM
1888  * operations that run against the same table with a fixed workload.
1889  * Ideally, successive VACUUM operations will behave as if there are
1890  * exactly zero LP_DEAD items in cases where there are close to zero.
1891  *
1892  * This is likely to be helpful with a table that is continually affected
1893  * by UPDATEs that can mostly apply the HOT optimization, but occasionally
1894  * have small aberrations that lead to just a few heap pages retaining
1895  * only one or two LP_DEAD items. This is pretty common; even when the
1896  * DBA goes out of their way to make UPDATEs use HOT, it is practically
1897  * impossible to predict whether HOT will be applied in 100% of cases.
1898  * It's far easier to ensure that 99%+ of all UPDATEs against a table use
1899  * HOT through careful tuning.
1900  */
1901  bypass = false;
1902  if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
1903  {
1904  BlockNumber threshold;
1905 
1906  Assert(vacrel->num_index_scans == 0);
1907  Assert(vacrel->lpdead_items == vacrel->dead_items_info->num_items);
1908  Assert(vacrel->do_index_vacuuming);
1909  Assert(vacrel->do_index_cleanup);
1910 
1911  /*
1912  * This crossover point at which we'll start to do index vacuuming is
1913  * expressed as a percentage of the total number of heap pages in the
1914  * table that are known to have at least one LP_DEAD item. This is
1915  * much more important than the total number of LP_DEAD items, since
1916  * it's a proxy for the number of heap pages whose visibility map bits
1917  * cannot be set on account of bypassing index and heap vacuuming.
1918  *
1919  * We apply one further precautionary test: the space currently used
1920  * to store the TIDs (TIDs that now all point to LP_DEAD items) must
1921  * not exceed 32MB. This limits the risk that we will bypass index
1922  * vacuuming again and again until eventually there is a VACUUM whose
1923  * dead_items space is not CPU cache resident.
1924  *
1925  * We don't take any special steps to remember the LP_DEAD items (such
1926  * as counting them in our final update to the stats system) when the
1927  * optimization is applied. Though the accounting used in analyze.c's
1928  * acquire_sample_rows() will recognize the same LP_DEAD items as dead
1929  * rows in its own stats report, that's okay. The discrepancy should
1930  * be negligible. If this optimization is ever expanded to cover more
1931  * cases then this may need to be reconsidered.
1932  */
1933  threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
1934  bypass = (vacrel->lpdead_item_pages < threshold &&
1935  (TidStoreMemoryUsage(vacrel->dead_items) < (32L * 1024L * 1024L)));
1936  }
1937 
1938  if (bypass)
1939  {
1940  /*
1941  * There are almost zero TIDs. Behave as if there were precisely
1942  * zero: bypass index vacuuming, but do index cleanup.
1943  *
1944  * We expect that the ongoing VACUUM operation will finish very
1945  * quickly, so there is no point in considering speeding up as a
1946  * failsafe against wraparound failure. (Index cleanup is expected to
1947  * finish very quickly in cases where there were no ambulkdelete()
1948  * calls.)
1949  */
1950  vacrel->do_index_vacuuming = false;
1951  }
1952  else if (lazy_vacuum_all_indexes(vacrel))
1953  {
1954  /*
1955  * We successfully completed a round of index vacuuming. Do related
1956  * heap vacuuming now.
1957  */
1958  lazy_vacuum_heap_rel(vacrel);
1959  }
1960  else
1961  {
1962  /*
1963  * Failsafe case.
1964  *
1965  * We attempted index vacuuming, but didn't finish a full round/full
1966  * index scan. This happens when relfrozenxid or relminmxid is too
1967  * far in the past.
1968  *
1969  * From this point on the VACUUM operation will do no further index
1970  * vacuuming or heap vacuuming. This VACUUM operation won't end up
1971  * back here again.
1972  */
1974  }
1975 
1976  /*
1977  * Forget the LP_DEAD items that we just vacuumed (or just decided to not
1978  * vacuum)
1979  */
1980  dead_items_reset(vacrel);
1981 }
static void dead_items_reset(LVRelState *vacrel)
Definition: vacuumlazy.c:2918
#define BYPASS_THRESHOLD_PAGES
Definition: vacuumlazy.c:87
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:1992
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
Definition: vacuumlazy.c:2109

References Assert, BYPASS_THRESHOLD_PAGES, LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::dead_items_info, dead_items_reset(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, LVRelState::rel_pages, TidStoreMemoryUsage(), and VacuumFailsafeActive.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 1992 of file vacuumlazy.c.

1993 {
1994  bool allindexes = true;
1995  double old_live_tuples = vacrel->rel->rd_rel->reltuples;
1996  const int progress_start_index[] = {
1999  };
2000  const int progress_end_index[] = {
2004  };
2005  int64 progress_start_val[2];
2006  int64 progress_end_val[3];
2007 
2008  Assert(vacrel->nindexes > 0);
2009  Assert(vacrel->do_index_vacuuming);
2010  Assert(vacrel->do_index_cleanup);
2011 
2012  /* Precheck for XID wraparound emergencies */
2013  if (lazy_check_wraparound_failsafe(vacrel))
2014  {
2015  /* Wraparound emergency -- don't even start an index scan */
2016  return false;
2017  }
2018 
2019  /*
2020  * Report that we are now vacuuming indexes and the number of indexes to
2021  * vacuum.
2022  */
2023  progress_start_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_INDEX;
2024  progress_start_val[1] = vacrel->nindexes;
2025  pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
2026 
2027  if (!ParallelVacuumIsActive(vacrel))
2028  {
2029  for (int idx = 0; idx < vacrel->nindexes; idx++)
2030  {
2031  Relation indrel = vacrel->indrels[idx];
2032  IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2033 
2034  vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat,
2035  old_live_tuples,
2036  vacrel);
2037 
2038  /* Report the number of indexes vacuumed */
2040  idx + 1);
2041 
2042  if (lazy_check_wraparound_failsafe(vacrel))
2043  {
2044  /* Wraparound emergency -- end current index scan */
2045  allindexes = false;
2046  break;
2047  }
2048  }
2049  }
2050  else
2051  {
2052  /* Outsource everything to parallel variant */
2053  parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples,
2054  vacrel->num_index_scans);
2055 
2056  /*
2057  * Do a postcheck to consider applying wraparound failsafe now. Note
2058  * that parallel VACUUM only gets the precheck and this postcheck.
2059  */
2060  if (lazy_check_wraparound_failsafe(vacrel))
2061  allindexes = false;
2062  }
2063 
2064  /*
2065  * We delete all LP_DEAD items from the first heap pass in all indexes on
2066  * each call here (except calls where we choose to do the failsafe). This
2067  * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2068  * of the failsafe triggering, which prevents the next call from taking
2069  * place).
2070  */
2071  Assert(vacrel->num_index_scans > 0 ||
2072  vacrel->dead_items_info->num_items == vacrel->lpdead_items);
2073  Assert(allindexes || VacuumFailsafeActive);
2074 
2075  /*
2076  * Increase and report the number of index scans. Also, we reset
2077  * PROGRESS_VACUUM_INDEXES_TOTAL and PROGRESS_VACUUM_INDEXES_PROCESSED.
2078  *
2079  * We deliberately include the case where we started a round of bulk
2080  * deletes that we weren't able to finish due to the failsafe triggering.
2081  */
2082  vacrel->num_index_scans++;
2083  progress_end_val[0] = 0;
2084  progress_end_val[1] = 0;
2085  progress_end_val[2] = vacrel->num_index_scans;
2086  pgstat_progress_update_multi_param(3, progress_end_index, progress_end_val);
2087 
2088  return allindexes;
2089 }
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:34
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
Definition: vacuumlazy.c:2428
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)

References Assert, LVRelState::dead_items_info, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, idx(), LVRelState::indrels, LVRelState::indstats, lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, LVRelState::pvs, RelationData::rd_rel, LVRelState::rel, and VacuumFailsafeActive.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static void lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
OffsetNumber deadoffsets,
int  num_offsets,
Buffer  vmbuffer 
)
static

Definition at line 2202 of file vacuumlazy.c.

2205 {
2206  Page page = BufferGetPage(buffer);
2208  int nunused = 0;
2209  TransactionId visibility_cutoff_xid;
2210  bool all_frozen;
2211  LVSavedErrInfo saved_err_info;
2212 
2213  Assert(vacrel->do_index_vacuuming);
2214 
2216 
2217  /* Update error traceback information */
2218  update_vacuum_error_info(vacrel, &saved_err_info,
2221 
2223 
2224  for (int i = 0; i < num_offsets; i++)
2225  {
2226  ItemId itemid;
2227  OffsetNumber toff = deadoffsets[i];
2228 
2229  itemid = PageGetItemId(page, toff);
2230 
2231  Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2232  ItemIdSetUnused(itemid);
2233  unused[nunused++] = toff;
2234  }
2235 
2236  Assert(nunused > 0);
2237 
2238  /* Attempt to truncate line pointer array now */
2240 
2241  /*
2242  * Mark buffer dirty before we write WAL.
2243  */
2244  MarkBufferDirty(buffer);
2245 
2246  /* XLOG stuff */
2247  if (RelationNeedsWAL(vacrel->rel))
2248  {
2249  log_heap_prune_and_freeze(vacrel->rel, buffer,
2251  false, /* no cleanup lock required */
2253  NULL, 0, /* frozen */
2254  NULL, 0, /* redirected */
2255  NULL, 0, /* dead */
2256  unused, nunused);
2257  }
2258 
2259  /*
2260  * End critical section, so we safely can do visibility tests (which
2261  * possibly need to perform IO and allocate memory!). If we crash now the
2262  * page (including the corresponding vm bit) might not be marked all
2263  * visible, but that's fine. A later vacuum will fix that.
2264  */
2265  END_CRIT_SECTION();
2266 
2267  /*
2268  * Now that we have removed the LP_DEAD items from the page, once again
2269  * check if the page has become all-visible. The page is already marked
2270  * dirty, exclusively locked, and, if needed, a full page image has been
2271  * emitted.
2272  */
2273  Assert(!PageIsAllVisible(page));
2274  if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
2275  &all_frozen))
2276  {
2278 
2279  if (all_frozen)
2280  {
2281  Assert(!TransactionIdIsValid(visibility_cutoff_xid));
2282  flags |= VISIBILITYMAP_ALL_FROZEN;
2283  }
2284 
2285  PageSetAllVisible(page);
2286  visibilitymap_set(vacrel->rel, blkno, buffer, InvalidXLogRecPtr,
2287  vmbuffer, visibility_cutoff_xid, flags);
2288  }
2289 
2290  /* Revert to the previous phase information for error traceback */
2291  restore_vacuum_error_info(vacrel, &saved_err_info);
2292 }
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:824
@ PRUNE_VACUUM_CLEANUP
Definition: heapam.h:272
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Definition: pruneheap.c:2053

References Assert, BufferGetPage(), LVRelState::do_index_vacuuming, END_CRIT_SECTION, heap_page_is_all_visible(), i, InvalidOffsetNumber, InvalidTransactionId, InvalidXLogRecPtr, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, log_heap_prune_and_freeze(), MarkBufferDirty(), MaxHeapTuplesPerPage, PageGetItemId(), PageIsAllVisible(), PageSetAllVisible(), PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PRUNE_VACUUM_CLEANUP, LVRelState::rel, RelationNeedsWAL, restore_vacuum_error_info(), START_CRIT_SECTION, TransactionIdIsValid, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_set().

Referenced by lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2109 of file vacuumlazy.c.

2110 {
2111  BlockNumber vacuumed_pages = 0;
2112  Buffer vmbuffer = InvalidBuffer;
2113  LVSavedErrInfo saved_err_info;
2114  TidStoreIter *iter;
2115  TidStoreIterResult *iter_result;
2116 
2117  Assert(vacrel->do_index_vacuuming);
2118  Assert(vacrel->do_index_cleanup);
2119  Assert(vacrel->num_index_scans > 0);
2120 
2121  /* Report that we are now vacuuming the heap */
2124 
2125  /* Update error traceback information */
2126  update_vacuum_error_info(vacrel, &saved_err_info,
2129 
2130  iter = TidStoreBeginIterate(vacrel->dead_items);
2131  while ((iter_result = TidStoreIterateNext(iter)) != NULL)
2132  {
2133  BlockNumber blkno;
2134  Buffer buf;
2135  Page page;
2136  Size freespace;
2137  OffsetNumber offsets[MaxOffsetNumber];
2138  int num_offsets;
2139 
2141 
2142  blkno = iter_result->blkno;
2143  vacrel->blkno = blkno;
2144 
2145  num_offsets = TidStoreGetBlockOffsets(iter_result, offsets, lengthof(offsets));
2146  Assert(num_offsets <= lengthof(offsets));
2147 
2148  /*
2149  * Pin the visibility map page in case we need to mark the page
2150  * all-visible. In most cases this will be very cheap, because we'll
2151  * already have the correct page pinned anyway.
2152  */
2153  visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
2154 
2155  /* We need a non-cleanup exclusive lock to mark dead_items unused */
2156  buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
2157  vacrel->bstrategy);
2159  lazy_vacuum_heap_page(vacrel, blkno, buf, offsets,
2160  num_offsets, vmbuffer);
2161 
2162  /* Now that we've vacuumed the page, record its available space */
2163  page = BufferGetPage(buf);
2164  freespace = PageGetHeapFreeSpace(page);
2165 
2167  RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
2168  vacuumed_pages++;
2169  }
2170  TidStoreEndIterate(iter);
2171 
2172  vacrel->blkno = InvalidBlockNumber;
2173  if (BufferIsValid(vmbuffer))
2174  ReleaseBuffer(vmbuffer);
2175 
2176  /*
2177  * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2178  * the second heap pass. No more, no less.
2179  */
2180  Assert(vacrel->num_index_scans > 1 ||
2181  (vacrel->dead_items_info->num_items == vacrel->lpdead_items &&
2182  vacuumed_pages == vacrel->lpdead_item_pages));
2183 
2184  ereport(DEBUG2,
2185  (errmsg("table \"%s\": removed %lld dead item identifiers in %u pages",
2186  vacrel->relname, (long long) vacrel->dead_items_info->num_items,
2187  vacuumed_pages)));
2188 
2189  /* Revert to the previous phase information for error traceback */
2190  restore_vacuum_error_info(vacrel, &saved_err_info);
2191 }
#define lengthof(array)
Definition: c.h:767
#define MaxOffsetNumber
Definition: off.h:28
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:35
BlockNumber blkno
Definition: tidstore.h:29
void TidStoreEndIterate(TidStoreIter *iter)
Definition: tidstore.c:526
TidStoreIterResult * TidStoreIterateNext(TidStoreIter *iter)
Definition: tidstore.c:501
int TidStoreGetBlockOffsets(TidStoreIterResult *result, OffsetNumber *offsets, int max_offsets)
Definition: tidstore.c:574
TidStoreIter * TidStoreBeginIterate(TidStore *ts)
Definition: tidstore.c:479
static void lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
Definition: vacuumlazy.c:2202

References Assert, LVRelState::blkno, TidStoreIterResult::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetPage(), BufferIsValid(), LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, ereport, errmsg(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_vacuum_heap_page(), lengthof, LockBuffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, MaxOffsetNumber, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelState::rel, ReleaseBuffer(), LVRelState::relname, restore_vacuum_error_info(), TidStoreBeginIterate(), TidStoreEndIterate(), TidStoreGetBlockOffsets(), TidStoreIterateNext(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, and visibilitymap_pin().

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 2428 of file vacuumlazy.c.

2430 {
2431  IndexVacuumInfo ivinfo;
2432  LVSavedErrInfo saved_err_info;
2433 
2434  ivinfo.index = indrel;
2435  ivinfo.heaprel = vacrel->rel;
2436  ivinfo.analyze_only = false;
2437  ivinfo.report_progress = false;
2438  ivinfo.estimated_count = true;
2439  ivinfo.message_level = DEBUG2;
2440  ivinfo.num_heap_tuples = reltuples;
2441  ivinfo.strategy = vacrel->bstrategy;
2442 
2443  /*
2444  * Update error traceback information.
2445  *
2446  * The index name is saved during this phase and restored immediately
2447  * after this phase. See vacuum_error_callback.
2448  */
2449  Assert(vacrel->indname == NULL);
2450  vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2451  update_vacuum_error_info(vacrel, &saved_err_info,
2454 
2455  /* Do bulk deletion */
2456  istat = vac_bulkdel_one_index(&ivinfo, istat, vacrel->dead_items,
2457  vacrel->dead_items_info);
2458 
2459  /* Revert to the previous phase information for error traceback */
2460  restore_vacuum_error_info(vacrel, &saved_err_info);
2461  pfree(vacrel->indname);
2462  vacrel->indname = NULL;
2463 
2464  return istat;
2465 }
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, TidStore *dead_items, VacDeadItemsInfo *dead_items_info)
Definition: vacuum.c:2516

References IndexVacuumInfo::analyze_only, Assert, LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3199 of file vacuumlazy.c.

3201 {
3202  vacrel->blkno = saved_vacrel->blkno;
3203  vacrel->offnum = saved_vacrel->offnum;
3204  vacrel->phase = saved_vacrel->phase;
3205 }
BlockNumber blkno
Definition: vacuumlazy.c:222
VacErrPhase phase
Definition: vacuumlazy.c:224
OffsetNumber offnum
Definition: vacuumlazy.c:223

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 2537 of file vacuumlazy.c.

2538 {
2539  BlockNumber possibly_freeable;
2540 
2541  if (!vacrel->do_rel_truncate || VacuumFailsafeActive)
2542  return false;
2543 
2544  possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
2545  if (possibly_freeable > 0 &&
2546  (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
2547  possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION))
2548  return true;
2549 
2550  return false;
2551 }
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:69
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:70

References LVRelState::do_rel_truncate, LVRelState::nonempty_pages, LVRelState::rel_pages, REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, and VacuumFailsafeActive.

Referenced by heap_vacuum_rel().

◆ update_relstats_all_indexes()

static void update_relstats_all_indexes ( LVRelState vacrel)
static

Definition at line 3081 of file vacuumlazy.c.

3082 {
3083  Relation *indrels = vacrel->indrels;
3084  int nindexes = vacrel->nindexes;
3085  IndexBulkDeleteResult **indstats = vacrel->indstats;
3086 
3087  Assert(vacrel->do_index_cleanup);
3088 
3089  for (int idx = 0; idx < nindexes; idx++)
3090  {
3091  Relation indrel = indrels[idx];
3092  IndexBulkDeleteResult *istat = indstats[idx];
3093 
3094  if (istat == NULL || istat->estimated_count)
3095  continue;
3096 
3097  /* Update index statistics */
3098  vac_update_relstats(indrel,
3099  istat->num_pages,
3100  istat->num_index_tuples,
3101  0,
3102  false,
3105  NULL, NULL, false);
3106  }
3107 }
bool estimated_count
Definition: genam.h:78
double num_index_tuples
Definition: genam.h:79

References Assert, LVRelState::do_index_cleanup, IndexBulkDeleteResult::estimated_count, idx(), LVRelState::indrels, LVRelState::indstats, InvalidMultiXactId, InvalidTransactionId, LVRelState::nindexes, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by heap_vacuum_rel().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3180 of file vacuumlazy.c.

3182 {
3183  if (saved_vacrel)
3184  {
3185  saved_vacrel->offnum = vacrel->offnum;
3186  saved_vacrel->blkno = vacrel->blkno;
3187  saved_vacrel->phase = vacrel->phase;
3188  }
3189 
3190  vacrel->blkno = blkno;
3191  vacrel->offnum = offnum;
3192  vacrel->phase = phase;
3193 }

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void *  arg)
static

Definition at line 3116 of file vacuumlazy.c.

3117 {
3118  LVRelState *errinfo = arg;
3119 
3120  switch (errinfo->phase)
3121  {
3123  if (BlockNumberIsValid(errinfo->blkno))
3124  {
3125  if (OffsetNumberIsValid(errinfo->offnum))
3126  errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3127  errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3128  else
3129  errcontext("while scanning block %u of relation \"%s.%s\"",
3130  errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3131  }
3132  else
3133  errcontext("while scanning relation \"%s.%s\"",
3134  errinfo->relnamespace, errinfo->relname);
3135  break;
3136 
3138  if (BlockNumberIsValid(errinfo->blkno))
3139  {
3140  if (OffsetNumberIsValid(errinfo->offnum))
3141  errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3142  errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3143  else
3144  errcontext("while vacuuming block %u of relation \"%s.%s\"",
3145  errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3146  }
3147  else
3148  errcontext("while vacuuming relation \"%s.%s\"",
3149  errinfo->relnamespace, errinfo->relname);
3150  break;
3151 
3153  errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3154  errinfo->indname, errinfo->relnamespace, errinfo->relname);
3155  break;
3156 
3158  errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3159  errinfo->indname, errinfo->relnamespace, errinfo->relname);
3160  break;
3161 
3163  if (BlockNumberIsValid(errinfo->blkno))
3164  errcontext("while truncating relation \"%s.%s\" to %u blocks",
3165  errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3166  break;
3167 
3169  default:
3170  return; /* do nothing; the errinfo may not be
3171  * initialized */
3172  }
3173 }
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define errcontext
Definition: elog.h:196
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
void * arg

References arg, LVRelState::blkno, BlockNumberIsValid(), errcontext, LVRelState::indname, LVRelState::offnum, OffsetNumberIsValid, LVRelState::phase, LVRelState::relname, LVRelState::relnamespace, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().