PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/genam.h"
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/tidstore.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xloginsert.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "common/int.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "utils/lsyscache.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel)
 
static bool heap_vac_scan_next_block (LVRelState *vacrel, BlockNumber *blkno, bool *all_visible_according_to_vm)
 
static void find_next_unskippable_block (LVRelState *vacrel, bool *skipsallvis)
 
static bool lazy_scan_new_or_empty (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
 
static void lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items)
 
static bool lazy_scan_noprune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static void lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_add (LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
 
static void dead_items_reset (LVRelState *vacrel)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_is_all_visible (LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
static void update_relstats_all_indexes (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static int cmpOffsetNumbers (const void *a, const void *b)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 141 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 147 of file vacuumlazy.c.

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 175 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 169 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 124 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 123 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 163 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 156 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 133 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 135 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 134 of file vacuumlazy.c.

Typedef Documentation

◆ LVRelState

typedef struct LVRelState LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 178 of file vacuumlazy.c.

179{
VacErrPhase
Definition: vacuumlazy.c:179
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition: vacuumlazy.c:181
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition: vacuumlazy.c:182
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition: vacuumlazy.c:185
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition: vacuumlazy.c:184
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition: vacuumlazy.c:183
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition: vacuumlazy.c:180

Function Documentation

◆ cmpOffsetNumbers()

static int cmpOffsetNumbers ( const void *  a,
const void *  b 
)
static

Definition at line 1490 of file vacuumlazy.c.

1491{
1492 return pg_cmp_u16(*(const OffsetNumber *) a, *(const OffsetNumber *) b);
1493}
static int pg_cmp_u16(uint16 a, uint16 b)
Definition: int.h:640
int b
Definition: isn.c:69
int a
Definition: isn.c:68
uint16 OffsetNumber
Definition: off.h:24

References a, b, and pg_cmp_u16().

Referenced by lazy_scan_prune().

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool *  lock_waiter_detected 
)
static

Definition at line 2841 of file vacuumlazy.c.

2842{
2843 BlockNumber blkno;
2844 BlockNumber prefetchedUntil;
2845 instr_time starttime;
2846
2847 /* Initialize the starttime if we check for conflicting lock requests */
2848 INSTR_TIME_SET_CURRENT(starttime);
2849
2850 /*
2851 * Start checking blocks at what we believe relation end to be and move
2852 * backwards. (Strange coding of loop control is needed because blkno is
2853 * unsigned.) To make the scan faster, we prefetch a few blocks at a time
2854 * in forward direction, so that OS-level readahead can kick in.
2855 */
2856 blkno = vacrel->rel_pages;
2858 "prefetch size must be power of 2");
2859 prefetchedUntil = InvalidBlockNumber;
2860 while (blkno > vacrel->nonempty_pages)
2861 {
2862 Buffer buf;
2863 Page page;
2864 OffsetNumber offnum,
2865 maxoff;
2866 bool hastup;
2867
2868 /*
2869 * Check if another process requests a lock on our relation. We are
2870 * holding an AccessExclusiveLock here, so they will be waiting. We
2871 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
2872 * only check if that interval has elapsed once every 32 blocks to
2873 * keep the number of system calls and actual shared lock table
2874 * lookups to a minimum.
2875 */
2876 if ((blkno % 32) == 0)
2877 {
2878 instr_time currenttime;
2879 instr_time elapsed;
2880
2881 INSTR_TIME_SET_CURRENT(currenttime);
2882 elapsed = currenttime;
2883 INSTR_TIME_SUBTRACT(elapsed, starttime);
2884 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
2886 {
2888 {
2889 ereport(vacrel->verbose ? INFO : DEBUG2,
2890 (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
2891 vacrel->relname)));
2892
2893 *lock_waiter_detected = true;
2894 return blkno;
2895 }
2896 starttime = currenttime;
2897 }
2898 }
2899
2900 /*
2901 * We don't insert a vacuum delay point here, because we have an
2902 * exclusive lock on the table which we want to hold for as short a
2903 * time as possible. We still need to check for interrupts however.
2904 */
2906
2907 blkno--;
2908
2909 /* If we haven't prefetched this lot yet, do so now. */
2910 if (prefetchedUntil > blkno)
2911 {
2912 BlockNumber prefetchStart;
2913 BlockNumber pblkno;
2914
2915 prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
2916 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
2917 {
2918 PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
2920 }
2921 prefetchedUntil = prefetchStart;
2922 }
2923
2925 vacrel->bstrategy);
2926
2927 /* In this phase we only need shared access to the buffer */
2929
2930 page = BufferGetPage(buf);
2931
2932 if (PageIsNew(page) || PageIsEmpty(page))
2933 {
2935 continue;
2936 }
2937
2938 hastup = false;
2939 maxoff = PageGetMaxOffsetNumber(page);
2940 for (offnum = FirstOffsetNumber;
2941 offnum <= maxoff;
2942 offnum = OffsetNumberNext(offnum))
2943 {
2944 ItemId itemid;
2945
2946 itemid = PageGetItemId(page, offnum);
2947
2948 /*
2949 * Note: any non-unused item should be taken as a reason to keep
2950 * this page. Even an LP_DEAD item makes truncation unsafe, since
2951 * we must not have cleaned out its index entries.
2952 */
2953 if (ItemIdIsUsed(itemid))
2954 {
2955 hastup = true;
2956 break; /* can stop scanning */
2957 }
2958 } /* scan along page */
2959
2961
2962 /* Done scanning if we found a tuple here */
2963 if (hastup)
2964 return blkno + 1;
2965 }
2966
2967 /*
2968 * If we fall out of the loop, all the previously-thought-to-be-empty
2969 * pages still are; we need not bother to look at the last known-nonempty
2970 * page.
2971 */
2972 return vacrel->nonempty_pages;
2973}
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:639
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4883
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5100
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:793
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:190
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:396
@ RBM_NORMAL
Definition: bufmgr.h:45
static bool PageIsEmpty(const PageData *page)
Definition: bufpage.h:224
static bool PageIsNew(const PageData *page)
Definition: bufpage.h:234
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:244
PageData * Page
Definition: bufpage.h:82
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:372
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:895
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define DEBUG2
Definition: elog.h:29
#define INFO
Definition: elog.h:34
#define ereport(elevel,...)
Definition: elog.h:149
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:194
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:362
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define FirstOffsetNumber
Definition: off.h:27
static char * buf
Definition: pg_test_fsync.c:72
@ MAIN_FORKNUM
Definition: relpath.h:58
bool verbose
Definition: vacuumlazy.c:227
BlockNumber nonempty_pages
Definition: vacuumlazy.c:263
Relation rel
Definition: vacuumlazy.c:191
BlockNumber rel_pages
Definition: vacuumlazy.c:242
BufferAccessStrategy bstrategy
Definition: vacuumlazy.c:196
char * relname
Definition: vacuumlazy.c:222
#define PREFETCH_SIZE
Definition: vacuumlazy.c:169
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:133

References AccessExclusiveLock, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BufferGetPage(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelState::nonempty_pages, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageIsNew(), PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelState::rel, LVRelState::rel_pages, LVRelState::relname, StaticAssertStmt, UnlockReleaseBuffer(), VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and LVRelState::verbose.

Referenced by lazy_truncate_heap().

◆ dead_items_add()

static void dead_items_add ( LVRelState vacrel,
BlockNumber  blkno,
OffsetNumber offsets,
int  num_offsets 
)
static

Definition at line 3048 of file vacuumlazy.c.

3050{
3051 const int prog_index[2] = {
3054 };
3055 int64 prog_val[2];
3056
3057 TidStoreSetBlockOffsets(vacrel->dead_items, blkno, offsets, num_offsets);
3058 vacrel->dead_items_info->num_items += num_offsets;
3059
3060 /* update the progress information */
3061 prog_val[0] = vacrel->dead_items_info->num_items;
3062 prog_val[1] = TidStoreMemoryUsage(vacrel->dead_items);
3063 pgstat_progress_update_multi_param(2, prog_index, prog_val);
3064}
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
int64_t int64
Definition: c.h:485
#define PROGRESS_VACUUM_DEAD_TUPLE_BYTES
Definition: progress.h:27
#define PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS
Definition: progress.h:28
VacDeadItemsInfo * dead_items_info
Definition: vacuumlazy.c:240
TidStore * dead_items
Definition: vacuumlazy.c:239
int64 num_items
Definition: vacuum.h:288
void TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: tidstore.c:345
size_t TidStoreMemoryUsage(TidStore *ts)
Definition: tidstore.c:532

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::num_items, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS, TidStoreMemoryUsage(), and TidStoreSetBlockOffsets().

Referenced by lazy_scan_noprune(), and lazy_scan_prune().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 2983 of file vacuumlazy.c.

2984{
2985 VacDeadItemsInfo *dead_items_info;
2986 int vac_work_mem = AmAutoVacuumWorkerProcess() &&
2987 autovacuum_work_mem != -1 ?
2989
2990 /*
2991 * Initialize state for a parallel vacuum. As of now, only one worker can
2992 * be used for an index, so we invoke parallelism only if there are at
2993 * least two indexes on a table.
2994 */
2995 if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
2996 {
2997 /*
2998 * Since parallel workers cannot access data in temporary tables, we
2999 * can't perform parallel vacuum on them.
3000 */
3001 if (RelationUsesLocalBuffers(vacrel->rel))
3002 {
3003 /*
3004 * Give warning only if the user explicitly tries to perform a
3005 * parallel vacuum on the temporary table.
3006 */
3007 if (nworkers > 0)
3009 (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3010 vacrel->relname)));
3011 }
3012 else
3013 vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
3014 vacrel->nindexes, nworkers,
3015 vac_work_mem,
3016 vacrel->verbose ? INFO : DEBUG2,
3017 vacrel->bstrategy);
3018
3019 /*
3020 * If parallel mode started, dead_items and dead_items_info spaces are
3021 * allocated in DSM.
3022 */
3023 if (ParallelVacuumIsActive(vacrel))
3024 {
3026 &vacrel->dead_items_info);
3027 return;
3028 }
3029 }
3030
3031 /*
3032 * Serial VACUUM case. Allocate both dead_items and dead_items_info
3033 * locally.
3034 */
3035
3036 dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));
3037 dead_items_info->max_bytes = vac_work_mem * 1024L;
3038 dead_items_info->num_items = 0;
3039 vacrel->dead_items_info = dead_items_info;
3040
3041 vacrel->dead_items = TidStoreCreateLocal(dead_items_info->max_bytes, true);
3042}
int autovacuum_work_mem
Definition: autovacuum.c:120
#define WARNING
Definition: elog.h:36
int maintenance_work_mem
Definition: globals.c:132
void * palloc(Size size)
Definition: mcxt.c:1317
#define AmAutoVacuumWorkerProcess()
Definition: miscadmin.h:381
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:637
ParallelVacuumState * pvs
Definition: vacuumlazy.c:197
int nindexes
Definition: vacuumlazy.c:193
Relation * indrels
Definition: vacuumlazy.c:192
bool do_index_vacuuming
Definition: vacuumlazy.c:207
size_t max_bytes
Definition: vacuum.h:287
TidStore * TidStoreCreateLocal(size_t max_bytes, bool insert_only)
Definition: tidstore.c:162
#define ParallelVacuumIsActive(vacrel)
Definition: vacuumlazy.c:175
TidStore * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, VacDeadItemsInfo **dead_items_info_p)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int vac_work_mem, int elevel, BufferAccessStrategy bstrategy)

References AmAutoVacuumWorkerProcess, autovacuum_work_mem, LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_vacuuming, ereport, errmsg(), LVRelState::indrels, INFO, maintenance_work_mem, VacDeadItemsInfo::max_bytes, LVRelState::nindexes, VacDeadItemsInfo::num_items, palloc(), parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, LVRelState::pvs, LVRelState::rel, RelationUsesLocalBuffers, LVRelState::relname, TidStoreCreateLocal(), LVRelState::verbose, and WARNING.

Referenced by heap_vacuum_rel().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 3090 of file vacuumlazy.c.

3091{
3092 if (!ParallelVacuumIsActive(vacrel))
3093 {
3094 /* Don't bother with pfree here */
3095 return;
3096 }
3097
3098 /* End parallel mode */
3099 parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
3100 vacrel->pvs = NULL;
3101}
IndexBulkDeleteResult ** indstats
Definition: vacuumlazy.c:269
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)

References LVRelState::indstats, parallel_vacuum_end(), ParallelVacuumIsActive, and LVRelState::pvs.

Referenced by heap_vacuum_rel().

◆ dead_items_reset()

static void dead_items_reset ( LVRelState vacrel)
static

Definition at line 3070 of file vacuumlazy.c.

3071{
3072 if (ParallelVacuumIsActive(vacrel))
3073 {
3075 return;
3076 }
3077
3078 /* Recreate the tidstore with the same max_bytes limitation */
3079 TidStoreDestroy(vacrel->dead_items);
3080 vacrel->dead_items = TidStoreCreateLocal(vacrel->dead_items_info->max_bytes, true);
3081
3082 /* Reset the counter */
3083 vacrel->dead_items_info->num_items = 0;
3084}
void TidStoreDestroy(TidStore *ts)
Definition: tidstore.c:317
void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::max_bytes, VacDeadItemsInfo::num_items, parallel_vacuum_reset_dead_items(), ParallelVacuumIsActive, LVRelState::pvs, TidStoreCreateLocal(), and TidStoreDestroy().

Referenced by lazy_vacuum().

◆ find_next_unskippable_block()

static void find_next_unskippable_block ( LVRelState vacrel,
bool *  skipsallvis 
)
static

Definition at line 1271 of file vacuumlazy.c.

1272{
1273 BlockNumber rel_pages = vacrel->rel_pages;
1274 BlockNumber next_unskippable_block = vacrel->next_unskippable_block + 1;
1275 Buffer next_unskippable_vmbuffer = vacrel->next_unskippable_vmbuffer;
1276 bool next_unskippable_allvis;
1277
1278 *skipsallvis = false;
1279
1280 for (;;)
1281 {
1282 uint8 mapbits = visibilitymap_get_status(vacrel->rel,
1283 next_unskippable_block,
1284 &next_unskippable_vmbuffer);
1285
1286 next_unskippable_allvis = (mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0;
1287
1288 /*
1289 * A block is unskippable if it is not all visible according to the
1290 * visibility map.
1291 */
1292 if (!next_unskippable_allvis)
1293 {
1294 Assert((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0);
1295 break;
1296 }
1297
1298 /*
1299 * Caller must scan the last page to determine whether it has tuples
1300 * (caller must have the opportunity to set vacrel->nonempty_pages).
1301 * This rule avoids having lazy_truncate_heap() take access-exclusive
1302 * lock on rel to attempt a truncation that fails anyway, just because
1303 * there are tuples on the last page (it is likely that there will be
1304 * tuples on other nearby pages as well, but those can be skipped).
1305 *
1306 * Implement this by always treating the last block as unsafe to skip.
1307 */
1308 if (next_unskippable_block == rel_pages - 1)
1309 break;
1310
1311 /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1312 if (!vacrel->skipwithvm)
1313 break;
1314
1315 /*
1316 * Aggressive VACUUM caller can't skip pages just because they are
1317 * all-visible. They may still skip all-frozen pages, which can't
1318 * contain XIDs < OldestXmin (XIDs that aren't already frozen by now).
1319 */
1320 if ((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0)
1321 {
1322 if (vacrel->aggressive)
1323 break;
1324
1325 /*
1326 * All-visible block is safe to skip in non-aggressive case. But
1327 * remember that the final range contains such a block for later.
1328 */
1329 *skipsallvis = true;
1330 }
1331
1332 next_unskippable_block++;
1333 }
1334
1335 /* write the local variables back to vacrel */
1336 vacrel->next_unskippable_block = next_unskippable_block;
1337 vacrel->next_unskippable_allvis = next_unskippable_allvis;
1338 vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
1339}
uint8_t uint8
Definition: c.h:486
#define Assert(condition)
Definition: c.h:815
Buffer next_unskippable_vmbuffer
Definition: vacuumlazy.c:285
bool aggressive
Definition: vacuumlazy.c:200
BlockNumber next_unskippable_block
Definition: vacuumlazy.c:283
bool skipwithvm
Definition: vacuumlazy.c:202
bool next_unskippable_allvis
Definition: vacuumlazy.c:284
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE

References LVRelState::aggressive, Assert, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_vmbuffer, LVRelState::rel, LVRelState::rel_pages, LVRelState::skipwithvm, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_get_status().

Referenced by heap_vac_scan_next_block().

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( LVRelState vacrel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool *  all_frozen 
)
static

Definition at line 3115 of file vacuumlazy.c.

3118{
3119 Page page = BufferGetPage(buf);
3121 OffsetNumber offnum,
3122 maxoff;
3123 bool all_visible = true;
3124
3125 *visibility_cutoff_xid = InvalidTransactionId;
3126 *all_frozen = true;
3127
3128 maxoff = PageGetMaxOffsetNumber(page);
3129 for (offnum = FirstOffsetNumber;
3130 offnum <= maxoff && all_visible;
3131 offnum = OffsetNumberNext(offnum))
3132 {
3133 ItemId itemid;
3134 HeapTupleData tuple;
3135
3136 /*
3137 * Set the offset number so that we can display it along with any
3138 * error that occurred while processing this tuple.
3139 */
3140 vacrel->offnum = offnum;
3141 itemid = PageGetItemId(page, offnum);
3142
3143 /* Unused or redirect line pointers are of no interest */
3144 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
3145 continue;
3146
3147 ItemPointerSet(&(tuple.t_self), blockno, offnum);
3148
3149 /*
3150 * Dead line pointers can have index pointers pointing to them. So
3151 * they can't be treated as visible
3152 */
3153 if (ItemIdIsDead(itemid))
3154 {
3155 all_visible = false;
3156 *all_frozen = false;
3157 break;
3158 }
3159
3160 Assert(ItemIdIsNormal(itemid));
3161
3162 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3163 tuple.t_len = ItemIdGetLength(itemid);
3164 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
3165
3166 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
3167 buf))
3168 {
3169 case HEAPTUPLE_LIVE:
3170 {
3171 TransactionId xmin;
3172
3173 /* Check comments in lazy_scan_prune. */
3175 {
3176 all_visible = false;
3177 *all_frozen = false;
3178 break;
3179 }
3180
3181 /*
3182 * The inserter definitely committed. But is it old enough
3183 * that everyone sees it as committed?
3184 */
3185 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3186 if (!TransactionIdPrecedes(xmin,
3187 vacrel->cutoffs.OldestXmin))
3188 {
3189 all_visible = false;
3190 *all_frozen = false;
3191 break;
3192 }
3193
3194 /* Track newest xmin on page. */
3195 if (TransactionIdFollows(xmin, *visibility_cutoff_xid) &&
3197 *visibility_cutoff_xid = xmin;
3198
3199 /* Check whether this tuple is already frozen or not */
3200 if (all_visible && *all_frozen &&
3202 *all_frozen = false;
3203 }
3204 break;
3205
3206 case HEAPTUPLE_DEAD:
3210 {
3211 all_visible = false;
3212 *all_frozen = false;
3213 break;
3214 }
3215 default:
3216 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3217 break;
3218 }
3219 } /* scan along page */
3220
3221 /* Clear the offset information once we have processed the given page. */
3222 vacrel->offnum = InvalidOffsetNumber;
3223
3224 return all_visible;
3225}
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3724
static Item PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:354
uint32 TransactionId
Definition: c.h:609
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7669
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:136
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:137
@ HEAPTUPLE_LIVE
Definition: heapam.h:135
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:138
@ HEAPTUPLE_DEAD
Definition: heapam.h:134
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:309
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:320
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define InvalidOffsetNumber
Definition: off.h:26
#define RelationGetRelid(relation)
Definition: rel.h:505
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
OffsetNumber offnum
Definition: vacuumlazy.c:225
struct VacuumCutoffs cutoffs
Definition: vacuumlazy.c:212
TransactionId OldestXmin
Definition: vacuum.h:267
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:280
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:314
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdIsNormal(xid)
Definition: transam.h:42

References Assert, buf, BufferGetBlockNumber(), BufferGetPage(), LVRelState::cutoffs, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, and TransactionIdPrecedes().

Referenced by lazy_scan_prune(), and lazy_vacuum_heap_page().

◆ heap_vac_scan_next_block()

static bool heap_vac_scan_next_block ( LVRelState vacrel,
BlockNumber blkno,
bool *  all_visible_according_to_vm 
)
static

Definition at line 1173 of file vacuumlazy.c.

1175{
1176 BlockNumber next_block;
1177
1178 /* relies on InvalidBlockNumber + 1 overflowing to 0 on first call */
1179 next_block = vacrel->current_block + 1;
1180
1181 /* Have we reached the end of the relation? */
1182 if (next_block >= vacrel->rel_pages)
1183 {
1185 {
1188 }
1189 *blkno = vacrel->rel_pages;
1190 return false;
1191 }
1192
1193 /*
1194 * We must be in one of the three following states:
1195 */
1196 if (next_block > vacrel->next_unskippable_block ||
1198 {
1199 /*
1200 * 1. We have just processed an unskippable block (or we're at the
1201 * beginning of the scan). Find the next unskippable block using the
1202 * visibility map.
1203 */
1204 bool skipsallvis;
1205
1206 find_next_unskippable_block(vacrel, &skipsallvis);
1207
1208 /*
1209 * We now know the next block that we must process. It can be the
1210 * next block after the one we just processed, or something further
1211 * ahead. If it's further ahead, we can jump to it, but we choose to
1212 * do so only if we can skip at least SKIP_PAGES_THRESHOLD consecutive
1213 * pages. Since we're reading sequentially, the OS should be doing
1214 * readahead for us, so there's no gain in skipping a page now and
1215 * then. Skipping such a range might even discourage sequential
1216 * detection.
1217 *
1218 * This test also enables more frequent relfrozenxid advancement
1219 * during non-aggressive VACUUMs. If the range has any all-visible
1220 * pages then skipping makes updating relfrozenxid unsafe, which is a
1221 * real downside.
1222 */
1223 if (vacrel->next_unskippable_block - next_block >= SKIP_PAGES_THRESHOLD)
1224 {
1225 next_block = vacrel->next_unskippable_block;
1226 if (skipsallvis)
1227 vacrel->skippedallvis = true;
1228 }
1229 }
1230
1231 /* Now we must be in one of the two remaining states: */
1232 if (next_block < vacrel->next_unskippable_block)
1233 {
1234 /*
1235 * 2. We are processing a range of blocks that we could have skipped
1236 * but chose not to. We know that they are all-visible in the VM,
1237 * otherwise they would've been unskippable.
1238 */
1239 *blkno = vacrel->current_block = next_block;
1240 *all_visible_according_to_vm = true;
1241 return true;
1242 }
1243 else
1244 {
1245 /*
1246 * 3. We reached the next unskippable block. Process it. On next
1247 * iteration, we will be back in state 1.
1248 */
1249 Assert(next_block == vacrel->next_unskippable_block);
1250
1251 *blkno = vacrel->current_block = next_block;
1252 *all_visible_according_to_vm = vacrel->next_unskippable_allvis;
1253 return true;
1254 }
1255}
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4866
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:347
BlockNumber current_block
Definition: vacuumlazy.c:282
bool skippedallvis
Definition: vacuumlazy.c:217
static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
Definition: vacuumlazy.c:1271
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:163

References Assert, BufferIsValid(), LVRelState::current_block, find_next_unskippable_block(), InvalidBlockNumber, InvalidBuffer, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_vmbuffer, LVRelState::rel_pages, ReleaseBuffer(), SKIP_PAGES_THRESHOLD, and LVRelState::skippedallvis.

Referenced by lazy_scan_heap().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 362 of file vacuumlazy.c.

364{
365 LVRelState *vacrel;
366 bool verbose,
367 instrument,
368 skipwithvm,
369 frozenxid_updated,
370 minmulti_updated;
371 BlockNumber orig_rel_pages,
372 new_rel_pages,
373 new_rel_allvisible;
374 PGRUsage ru0;
375 TimestampTz starttime = 0;
376 PgStat_Counter startreadtime = 0,
377 startwritetime = 0;
378 WalUsage startwalusage = pgWalUsage;
379 BufferUsage startbufferusage = pgBufferUsage;
380 ErrorContextCallback errcallback;
381 char **indnames = NULL;
382
383 verbose = (params->options & VACOPT_VERBOSE) != 0;
384 instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
385 params->log_min_duration >= 0));
386 if (instrument)
387 {
388 pg_rusage_init(&ru0);
389 starttime = GetCurrentTimestamp();
390 if (track_io_timing)
391 {
392 startreadtime = pgStatBlockReadTime;
393 startwritetime = pgStatBlockWriteTime;
394 }
395 }
396
398 RelationGetRelid(rel));
399
400 /*
401 * Setup error traceback support for ereport() first. The idea is to set
402 * up an error context callback to display additional information on any
403 * error during a vacuum. During different phases of vacuum, we update
404 * the state so that the error context callback always display current
405 * information.
406 *
407 * Copy the names of heap rel into local memory for error reporting
408 * purposes, too. It isn't always safe to assume that we can get the name
409 * of each rel. It's convenient for code in lazy_scan_heap to always use
410 * these temp copies.
411 */
412 vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
416 vacrel->indname = NULL;
418 vacrel->verbose = verbose;
419 errcallback.callback = vacuum_error_callback;
420 errcallback.arg = vacrel;
421 errcallback.previous = error_context_stack;
422 error_context_stack = &errcallback;
423
424 /* Set up high level stuff about rel and its indexes */
425 vacrel->rel = rel;
427 &vacrel->indrels);
428 vacrel->bstrategy = bstrategy;
429 if (instrument && vacrel->nindexes > 0)
430 {
431 /* Copy index names used by instrumentation (not error reporting) */
432 indnames = palloc(sizeof(char *) * vacrel->nindexes);
433 for (int i = 0; i < vacrel->nindexes; i++)
434 indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
435 }
436
437 /*
438 * The index_cleanup param either disables index vacuuming and cleanup or
439 * forces it to go ahead when we would otherwise apply the index bypass
440 * optimization. The default is 'auto', which leaves the final decision
441 * up to lazy_vacuum().
442 *
443 * The truncate param allows user to avoid attempting relation truncation,
444 * though it can't force truncation to happen.
445 */
448 params->truncate != VACOPTVALUE_AUTO);
449
450 /*
451 * While VacuumFailSafeActive is reset to false before calling this, we
452 * still need to reset it here due to recursive calls.
453 */
454 VacuumFailsafeActive = false;
455 vacrel->consider_bypass_optimization = true;
456 vacrel->do_index_vacuuming = true;
457 vacrel->do_index_cleanup = true;
458 vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
459 if (params->index_cleanup == VACOPTVALUE_DISABLED)
460 {
461 /* Force disable index vacuuming up-front */
462 vacrel->do_index_vacuuming = false;
463 vacrel->do_index_cleanup = false;
464 }
465 else if (params->index_cleanup == VACOPTVALUE_ENABLED)
466 {
467 /* Force index vacuuming. Note that failsafe can still bypass. */
468 vacrel->consider_bypass_optimization = false;
469 }
470 else
471 {
472 /* Default/auto, make all decisions dynamically */
474 }
475
476 /* Initialize page counters explicitly (be tidy) */
477 vacrel->scanned_pages = 0;
478 vacrel->removed_pages = 0;
479 vacrel->new_frozen_tuple_pages = 0;
480 vacrel->lpdead_item_pages = 0;
481 vacrel->missed_dead_pages = 0;
482 vacrel->nonempty_pages = 0;
483 /* dead_items_alloc allocates vacrel->dead_items later on */
484
485 /* Allocate/initialize output statistics state */
486 vacrel->new_rel_tuples = 0;
487 vacrel->new_live_tuples = 0;
488 vacrel->indstats = (IndexBulkDeleteResult **)
489 palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
490
491 /* Initialize remaining counters (be tidy) */
492 vacrel->num_index_scans = 0;
493 vacrel->tuples_deleted = 0;
494 vacrel->tuples_frozen = 0;
495 vacrel->lpdead_items = 0;
496 vacrel->live_tuples = 0;
497 vacrel->recently_dead_tuples = 0;
498 vacrel->missed_dead_tuples = 0;
499
500 vacrel->vm_new_visible_pages = 0;
501 vacrel->vm_new_visible_frozen_pages = 0;
502 vacrel->vm_new_frozen_pages = 0;
503
504 /*
505 * Get cutoffs that determine which deleted tuples are considered DEAD,
506 * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
507 * the extent of the blocks that we'll scan in lazy_scan_heap. It has to
508 * happen in this order to ensure that the OldestXmin cutoff field works
509 * as an upper bound on the XIDs stored in the pages we'll actually scan
510 * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
511 *
512 * Next acquire vistest, a related cutoff that's used in pruning. We use
513 * vistest in combination with OldestXmin to ensure that
514 * heap_page_prune_and_freeze() always removes any deleted tuple whose
515 * xmax is < OldestXmin. lazy_scan_prune must never become confused about
516 * whether a tuple should be frozen or removed. (In the future we might
517 * want to teach lazy_scan_prune to recompute vistest from time to time,
518 * to increase the number of dead tuples it can prune away.)
519 */
520 vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
521 vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
522 vacrel->vistest = GlobalVisTestFor(rel);
523 /* Initialize state used to track oldest extant XID/MXID */
524 vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
525 vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
526 vacrel->skippedallvis = false;
527 skipwithvm = true;
529 {
530 /*
531 * Force aggressive mode, and disable skipping blocks using the
532 * visibility map (even those set all-frozen)
533 */
534 vacrel->aggressive = true;
535 skipwithvm = false;
536 }
537
538 vacrel->skipwithvm = skipwithvm;
539
540 if (verbose)
541 {
542 if (vacrel->aggressive)
544 (errmsg("aggressively vacuuming \"%s.%s.%s\"",
545 vacrel->dbname, vacrel->relnamespace,
546 vacrel->relname)));
547 else
549 (errmsg("vacuuming \"%s.%s.%s\"",
550 vacrel->dbname, vacrel->relnamespace,
551 vacrel->relname)));
552 }
553
554 /*
555 * Allocate dead_items memory using dead_items_alloc. This handles
556 * parallel VACUUM initialization as part of allocating shared memory
557 * space used for dead_items. (But do a failsafe precheck first, to
558 * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
559 * is already dangerously old.)
560 */
562 dead_items_alloc(vacrel, params->nworkers);
563
564 /*
565 * Call lazy_scan_heap to perform all required heap pruning, index
566 * vacuuming, and heap vacuuming (plus related processing)
567 */
568 lazy_scan_heap(vacrel);
569
570 /*
571 * Free resources managed by dead_items_alloc. This ends parallel mode in
572 * passing when necessary.
573 */
574 dead_items_cleanup(vacrel);
576
577 /*
578 * Update pg_class entries for each of rel's indexes where appropriate.
579 *
580 * Unlike the later update to rel's pg_class entry, this is not critical.
581 * Maintains relpages/reltuples statistics used by the planner only.
582 */
583 if (vacrel->do_index_cleanup)
585
586 /* Done with rel's indexes */
587 vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
588
589 /* Optionally truncate rel */
590 if (should_attempt_truncation(vacrel))
591 lazy_truncate_heap(vacrel);
592
593 /* Pop the error context stack */
594 error_context_stack = errcallback.previous;
595
596 /* Report that we are now doing final cleanup */
599
600 /*
601 * Prepare to update rel's pg_class entry.
602 *
603 * Aggressive VACUUMs must always be able to advance relfrozenxid to a
604 * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
605 * Non-aggressive VACUUMs may advance them by any amount, or not at all.
606 */
607 Assert(vacrel->NewRelfrozenXid == vacrel->cutoffs.OldestXmin ||
609 vacrel->cutoffs.relfrozenxid,
610 vacrel->NewRelfrozenXid));
611 Assert(vacrel->NewRelminMxid == vacrel->cutoffs.OldestMxact ||
613 vacrel->cutoffs.relminmxid,
614 vacrel->NewRelminMxid));
615 if (vacrel->skippedallvis)
616 {
617 /*
618 * Must keep original relfrozenxid in a non-aggressive VACUUM that
619 * chose to skip an all-visible page range. The state that tracks new
620 * values will have missed unfrozen XIDs from the pages we skipped.
621 */
622 Assert(!vacrel->aggressive);
625 }
626
627 /*
628 * For safety, clamp relallvisible to be not more than what we're setting
629 * pg_class.relpages to
630 */
631 new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
632 visibilitymap_count(rel, &new_rel_allvisible, NULL);
633 if (new_rel_allvisible > new_rel_pages)
634 new_rel_allvisible = new_rel_pages;
635
636 /*
637 * Now actually update rel's pg_class entry.
638 *
639 * In principle new_live_tuples could be -1 indicating that we (still)
640 * don't know the tuple count. In practice that can't happen, since we
641 * scan every page that isn't skipped using the visibility map.
642 */
643 vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
644 new_rel_allvisible, vacrel->nindexes > 0,
645 vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
646 &frozenxid_updated, &minmulti_updated, false);
647
648 /*
649 * Report results to the cumulative stats system, too.
650 *
651 * Deliberately avoid telling the stats system about LP_DEAD items that
652 * remain in the table due to VACUUM bypassing index and heap vacuuming.
653 * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
654 * It seems like a good idea to err on the side of not vacuuming again too
655 * soon in cases where the failsafe prevented significant amounts of heap
656 * vacuuming.
657 */
659 rel->rd_rel->relisshared,
660 Max(vacrel->new_live_tuples, 0),
661 vacrel->recently_dead_tuples +
662 vacrel->missed_dead_tuples);
664
665 if (instrument)
666 {
668
669 if (verbose || params->log_min_duration == 0 ||
670 TimestampDifferenceExceeds(starttime, endtime,
671 params->log_min_duration))
672 {
673 long secs_dur;
674 int usecs_dur;
675 WalUsage walusage;
676 BufferUsage bufferusage;
678 char *msgfmt;
679 int32 diff;
680 double read_rate = 0,
681 write_rate = 0;
682 int64 total_blks_hit;
683 int64 total_blks_read;
684 int64 total_blks_dirtied;
685
686 TimestampDifference(starttime, endtime, &secs_dur, &usecs_dur);
687 memset(&walusage, 0, sizeof(WalUsage));
688 WalUsageAccumDiff(&walusage, &pgWalUsage, &startwalusage);
689 memset(&bufferusage, 0, sizeof(BufferUsage));
690 BufferUsageAccumDiff(&bufferusage, &pgBufferUsage, &startbufferusage);
691
692 total_blks_hit = bufferusage.shared_blks_hit +
693 bufferusage.local_blks_hit;
694 total_blks_read = bufferusage.shared_blks_read +
695 bufferusage.local_blks_read;
696 total_blks_dirtied = bufferusage.shared_blks_dirtied +
697 bufferusage.local_blks_dirtied;
698
700 if (verbose)
701 {
702 /*
703 * Aggressiveness already reported earlier, in dedicated
704 * VACUUM VERBOSE ereport
705 */
706 Assert(!params->is_wraparound);
707 msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
708 }
709 else if (params->is_wraparound)
710 {
711 /*
712 * While it's possible for a VACUUM to be both is_wraparound
713 * and !aggressive, that's just a corner-case -- is_wraparound
714 * implies aggressive. Produce distinct output for the corner
715 * case all the same, just in case.
716 */
717 if (vacrel->aggressive)
718 msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
719 else
720 msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
721 }
722 else
723 {
724 if (vacrel->aggressive)
725 msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
726 else
727 msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
728 }
729 appendStringInfo(&buf, msgfmt,
730 vacrel->dbname,
731 vacrel->relnamespace,
732 vacrel->relname,
733 vacrel->num_index_scans);
734 appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total)\n"),
735 vacrel->removed_pages,
736 new_rel_pages,
737 vacrel->scanned_pages,
738 orig_rel_pages == 0 ? 100.0 :
739 100.0 * vacrel->scanned_pages / orig_rel_pages);
741 _("tuples: %lld removed, %lld remain, %lld are dead but not yet removable\n"),
742 (long long) vacrel->tuples_deleted,
743 (long long) vacrel->new_rel_tuples,
744 (long long) vacrel->recently_dead_tuples);
745 if (vacrel->missed_dead_tuples > 0)
747 _("tuples missed: %lld dead from %u pages not removed due to cleanup lock contention\n"),
748 (long long) vacrel->missed_dead_tuples,
749 vacrel->missed_dead_pages);
750 diff = (int32) (ReadNextTransactionId() -
751 vacrel->cutoffs.OldestXmin);
753 _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
754 vacrel->cutoffs.OldestXmin, diff);
755 if (frozenxid_updated)
756 {
757 diff = (int32) (vacrel->NewRelfrozenXid -
758 vacrel->cutoffs.relfrozenxid);
760 _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
761 vacrel->NewRelfrozenXid, diff);
762 }
763 if (minmulti_updated)
764 {
765 diff = (int32) (vacrel->NewRelminMxid -
766 vacrel->cutoffs.relminmxid);
768 _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
769 vacrel->NewRelminMxid, diff);
770 }
771 appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %lld tuples frozen\n"),
773 orig_rel_pages == 0 ? 100.0 :
774 100.0 * vacrel->new_frozen_tuple_pages /
775 orig_rel_pages,
776 (long long) vacrel->tuples_frozen);
777
779 _("visibility map: %u pages set all-visible, %u pages set all-frozen (%u were all-visible)\n"),
780 vacrel->vm_new_visible_pages,
782 vacrel->vm_new_frozen_pages,
783 vacrel->vm_new_frozen_pages);
784 if (vacrel->do_index_vacuuming)
785 {
786 if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
787 appendStringInfoString(&buf, _("index scan not needed: "));
788 else
789 appendStringInfoString(&buf, _("index scan needed: "));
790
791 msgfmt = _("%u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n");
792 }
793 else
794 {
796 appendStringInfoString(&buf, _("index scan bypassed: "));
797 else
798 appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
799
800 msgfmt = _("%u pages from table (%.2f%% of total) have %lld dead item identifiers\n");
801 }
802 appendStringInfo(&buf, msgfmt,
803 vacrel->lpdead_item_pages,
804 orig_rel_pages == 0 ? 100.0 :
805 100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
806 (long long) vacrel->lpdead_items);
807 for (int i = 0; i < vacrel->nindexes; i++)
808 {
809 IndexBulkDeleteResult *istat = vacrel->indstats[i];
810
811 if (!istat)
812 continue;
813
815 _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
816 indnames[i],
817 istat->num_pages,
818 istat->pages_newly_deleted,
819 istat->pages_deleted,
820 istat->pages_free);
821 }
822 if (track_io_timing)
823 {
824 double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
825 double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
826
827 appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
828 read_ms, write_ms);
829 }
830 if (secs_dur > 0 || usecs_dur > 0)
831 {
832 read_rate = (double) BLCKSZ * total_blks_read /
833 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
834 write_rate = (double) BLCKSZ * total_blks_dirtied /
835 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
836 }
837 appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
838 read_rate, write_rate);
840 _("buffer usage: %lld hits, %lld reads, %lld dirtied\n"),
841 (long long) total_blks_hit,
842 (long long) total_blks_read,
843 (long long) total_blks_dirtied);
845 _("WAL usage: %lld records, %lld full page images, %llu bytes\n"),
846 (long long) walusage.wal_records,
847 (long long) walusage.wal_fpi,
848 (unsigned long long) walusage.wal_bytes);
849 appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
850
852 (errmsg_internal("%s", buf.data)));
853 pfree(buf.data);
854 }
855 }
856
857 /* Cleanup index statistics and index names */
858 for (int i = 0; i < vacrel->nindexes; i++)
859 {
860 if (vacrel->indstats[i])
861 pfree(vacrel->indstats[i]);
862
863 if (instrument)
864 pfree(indnames[i]);
865 }
866}
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1720
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1780
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1644
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
bool track_io_timing
Definition: bufmgr.c:143
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:273
#define Max(x, y)
Definition: c.h:955
int32_t int32
Definition: c.h:484
int64 TimestampTz
Definition: timestamp.h:39
char * get_database_name(Oid dbid)
Definition: dbcommands.c:3187
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1157
ErrorContextCallback * error_context_stack
Definition: elog.c:94
#define _(x)
Definition: elog.c:90
#define LOG
Definition: elog.h:31
Oid MyDatabaseId
Definition: globals.c:93
int verbose
WalUsage pgWalUsage
Definition: instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition: instrument.c:286
BufferUsage pgBufferUsage
Definition: instrument.c:20
void BufferUsageAccumDiff(BufferUsage *dst, const BufferUsage *add, const BufferUsage *sub)
Definition: instrument.c:248
int i
Definition: isn.c:72
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3366
char * pstrdup(const char *in)
Definition: mcxt.c:1696
void pfree(void *pointer)
Definition: mcxt.c:1521
void * palloc0(Size size)
Definition: mcxt.c:1347
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3331
#define InvalidMultiXactId
Definition: multixact.h:24
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int64 PgStat_Counter
Definition: pgstat.h:66
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4107
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:38
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define RelationGetRelationName(relation)
Definition: rel.h:539
#define RelationGetNamespace(relation)
Definition: rel.h:546
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:145
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:230
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
int64 shared_blks_dirtied
Definition: instrument.h:28
int64 local_blks_hit
Definition: instrument.h:30
int64 shared_blks_read
Definition: instrument.h:27
int64 local_blks_read
Definition: instrument.h:31
int64 local_blks_dirtied
Definition: instrument.h:32
int64 shared_blks_hit
Definition: instrument.h:26
struct ErrorContextCallback * previous
Definition: elog.h:296
void(* callback)(void *arg)
Definition: elog.h:297
BlockNumber pages_deleted
Definition: genam.h:84
BlockNumber pages_newly_deleted
Definition: genam.h:83
BlockNumber pages_free
Definition: genam.h:85
BlockNumber num_pages
Definition: genam.h:79
BlockNumber vm_new_frozen_pages
Definition: vacuumlazy.c:259
int64 tuples_deleted
Definition: vacuumlazy.c:274
bool do_rel_truncate
Definition: vacuumlazy.c:209
BlockNumber scanned_pages
Definition: vacuumlazy.c:243
BlockNumber new_frozen_tuple_pages
Definition: vacuumlazy.c:245
GlobalVisState * vistest
Definition: vacuumlazy.c:213
BlockNumber removed_pages
Definition: vacuumlazy.c:244
int num_index_scans
Definition: vacuumlazy.c:272
double new_live_tuples
Definition: vacuumlazy.c:267
double new_rel_tuples
Definition: vacuumlazy.c:266
TransactionId NewRelfrozenXid
Definition: vacuumlazy.c:215
bool consider_bypass_optimization
Definition: vacuumlazy.c:204
int64 recently_dead_tuples
Definition: vacuumlazy.c:278
int64 tuples_frozen
Definition: vacuumlazy.c:275
char * dbname
Definition: vacuumlazy.c:220
BlockNumber missed_dead_pages
Definition: vacuumlazy.c:262
char * relnamespace
Definition: vacuumlazy.c:221
int64 live_tuples
Definition: vacuumlazy.c:277
int64 lpdead_items
Definition: vacuumlazy.c:276
BlockNumber lpdead_item_pages
Definition: vacuumlazy.c:261
bool do_index_cleanup
Definition: vacuumlazy.c:208
MultiXactId NewRelminMxid
Definition: vacuumlazy.c:216
int64 missed_dead_tuples
Definition: vacuumlazy.c:279
BlockNumber vm_new_visible_pages
Definition: vacuumlazy.c:248
VacErrPhase phase
Definition: vacuumlazy.c:226
char * indname
Definition: vacuumlazy.c:223
BlockNumber vm_new_visible_frozen_pages
Definition: vacuumlazy.c:256
Form_pg_class rd_rel
Definition: rel.h:111
TransactionId FreezeLimit
Definition: vacuum.h:277
TransactionId relfrozenxid
Definition: vacuum.h:251
MultiXactId relminmxid
Definition: vacuum.h:252
MultiXactId MultiXactCutoff
Definition: vacuum.h:278
MultiXactId OldestMxact
Definition: vacuum.h:268
int nworkers
Definition: vacuum.h:239
VacOptValue truncate
Definition: vacuum.h:231
bits32 options
Definition: vacuum.h:219
bool is_wraparound
Definition: vacuum.h:226
int log_min_duration
Definition: vacuum.h:227
VacOptValue index_cleanup
Definition: vacuum.h:230
uint64 wal_bytes
Definition: instrument.h:55
int64 wal_fpi
Definition: instrument.h:54
int64 wal_records
Definition: instrument.h:53
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:299
static TransactionId ReadNextTransactionId(void)
Definition: transam.h:315
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:2297
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
Definition: vacuum.c:1410
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:2340
bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params, struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1084
bool VacuumFailsafeActive
Definition: vacuum.c:95
#define VACOPT_VERBOSE
Definition: vacuum.h:182
@ VACOPTVALUE_AUTO
Definition: vacuum.h:203
@ VACOPTVALUE_ENABLED
Definition: vacuum.h:205
@ VACOPTVALUE_UNSPECIFIED
Definition: vacuum.h:202
@ VACOPTVALUE_DISABLED
Definition: vacuum.h:204
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition: vacuum.h:188
static void dead_items_cleanup(LVRelState *vacrel)
Definition: vacuumlazy.c:3090
static void update_relstats_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3231
static void vacuum_error_callback(void *arg)
Definition: vacuumlazy.c:3266
static void lazy_truncate_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:2710
static bool should_attempt_truncation(LVRelState *vacrel)
Definition: vacuumlazy.c:2690
static void lazy_scan_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:905
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
Definition: vacuumlazy.c:2460
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
Definition: vacuumlazy.c:2983
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
bool IsInParallelMode(void)
Definition: xact.c:1088

References _, LVRelState::aggressive, AmAutoVacuumWorkerProcess, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert, LVRelState::bstrategy, buf, BufferUsageAccumDiff(), ErrorContextCallback::callback, LVRelState::consider_bypass_optimization, LVRelState::cutoffs, LVRelState::dbname, dead_items_alloc(), dead_items_cleanup(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errmsg(), errmsg_internal(), error_context_stack, VacuumCutoffs::FreezeLimit, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), GlobalVisTestFor(), i, VacuumParams::index_cleanup, LVRelState::indname, LVRelState::indrels, LVRelState::indstats, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsInParallelMode(), lazy_check_wraparound_failsafe(), lazy_scan_heap(), lazy_truncate_heap(), LVRelState::live_tuples, BufferUsage::local_blks_dirtied, BufferUsage::local_blks_hit, BufferUsage::local_blks_read, LOG, VacuumParams::log_min_duration, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, Max, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, VacuumCutoffs::MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyDatabaseId, LVRelState::new_frozen_tuple_pages, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, NoLock, LVRelState::nonempty_pages, LVRelState::num_index_scans, IndexBulkDeleteResult::num_pages, VacuumParams::nworkers, VacuumCutoffs::OldestMxact, VacuumCutoffs::OldestXmin, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgBufferUsage, pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, LVRelState::phase, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, pstrdup(), RelationData::rd_rel, ReadNextTransactionId(), LVRelState::recently_dead_tuples, LVRelState::rel, LVRelState::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, LVRelState::relname, LVRelState::relnamespace, LVRelState::removed_pages, RowExclusiveLock, LVRelState::scanned_pages, BufferUsage::shared_blks_dirtied, BufferUsage::shared_blks_hit, BufferUsage::shared_blks_read, should_attempt_truncation(), LVRelState::skippedallvis, LVRelState::skipwithvm, TimestampDifference(), TimestampDifferenceExceeds(), track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelState::tuples_deleted, LVRelState::tuples_frozen, update_relstats_all_indexes(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_get_cutoffs(), VacuumFailsafeActive, LVRelState::verbose, verbose, visibilitymap_count(), LVRelState::vistest, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_records, and WalUsageAccumDiff().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 2460 of file vacuumlazy.c.

2461{
2462 /* Don't warn more than once per VACUUM */
2464 return true;
2465
2467 {
2468 const int progress_index[] = {
2471 };
2472 int64 progress_val[2] = {0, 0};
2473
2474 VacuumFailsafeActive = true;
2475
2476 /*
2477 * Abandon use of a buffer access strategy to allow use of all of
2478 * shared buffers. We assume the caller who allocated the memory for
2479 * the BufferAccessStrategy will free it.
2480 */
2481 vacrel->bstrategy = NULL;
2482
2483 /* Disable index vacuuming, index cleanup, and heap rel truncation */
2484 vacrel->do_index_vacuuming = false;
2485 vacrel->do_index_cleanup = false;
2486 vacrel->do_rel_truncate = false;
2487
2488 /* Reset the progress counters */
2489 pgstat_progress_update_multi_param(2, progress_index, progress_val);
2490
2492 (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2493 vacrel->dbname, vacrel->relnamespace, vacrel->relname,
2494 vacrel->num_index_scans),
2495 errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
2496 errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2497 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2498
2499 /* Stop applying cost limits from this point on */
2500 VacuumCostActive = false;
2502
2503 return true;
2504 }
2505
2506 return false;
2507}
#define unlikely(x)
Definition: c.h:333
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errhint(const char *fmt,...)
Definition: elog.c:1317
bool VacuumCostActive
Definition: globals.c:157
int VacuumCostBalance
Definition: globals.c:156
#define PROGRESS_VACUUM_INDEXES_PROCESSED
Definition: progress.h:30
#define PROGRESS_VACUUM_INDEXES_TOTAL
Definition: progress.h:29
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1252

References LVRelState::bstrategy, LVRelState::cutoffs, LVRelState::dbname, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errdetail(), errhint(), errmsg(), LVRelState::num_index_scans, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, LVRelState::relname, LVRelState::relnamespace, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, VacuumFailsafeActive, and WARNING.

Referenced by heap_vacuum_rel(), lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 2513 of file vacuumlazy.c.

2514{
2515 double reltuples = vacrel->new_rel_tuples;
2516 bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
2517 const int progress_start_index[] = {
2520 };
2521 const int progress_end_index[] = {
2524 };
2525 int64 progress_start_val[2];
2526 int64 progress_end_val[2] = {0, 0};
2527
2528 Assert(vacrel->do_index_cleanup);
2529 Assert(vacrel->nindexes > 0);
2530
2531 /*
2532 * Report that we are now cleaning up indexes and the number of indexes to
2533 * cleanup.
2534 */
2535 progress_start_val[0] = PROGRESS_VACUUM_PHASE_INDEX_CLEANUP;
2536 progress_start_val[1] = vacrel->nindexes;
2537 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
2538
2539 if (!ParallelVacuumIsActive(vacrel))
2540 {
2541 for (int idx = 0; idx < vacrel->nindexes; idx++)
2542 {
2543 Relation indrel = vacrel->indrels[idx];
2544 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2545
2546 vacrel->indstats[idx] =
2547 lazy_cleanup_one_index(indrel, istat, reltuples,
2548 estimated_count, vacrel);
2549
2550 /* Report the number of indexes cleaned up */
2552 idx + 1);
2553 }
2554 }
2555 else
2556 {
2557 /* Outsource everything to parallel variant */
2558 parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
2559 vacrel->num_index_scans,
2560 estimated_count);
2561 }
2562
2563 /* Reset the progress counters */
2564 pgstat_progress_update_multi_param(2, progress_end_index, progress_end_val);
2565}
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:36
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
Definition: vacuumlazy.c:2630
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)

References Assert, LVRelState::do_index_cleanup, idx(), LVRelState::indrels, LVRelState::indstats, lazy_cleanup_one_index(), LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::num_index_scans, parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelState::pvs, LVRelState::rel_pages, and LVRelState::scanned_pages.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 2630 of file vacuumlazy.c.

2633{
2634 IndexVacuumInfo ivinfo;
2635 LVSavedErrInfo saved_err_info;
2636
2637 ivinfo.index = indrel;
2638 ivinfo.heaprel = vacrel->rel;
2639 ivinfo.analyze_only = false;
2640 ivinfo.report_progress = false;
2641 ivinfo.estimated_count = estimated_count;
2642 ivinfo.message_level = DEBUG2;
2643
2644 ivinfo.num_heap_tuples = reltuples;
2645 ivinfo.strategy = vacrel->bstrategy;
2646
2647 /*
2648 * Update error traceback information.
2649 *
2650 * The index name is saved during this phase and restored immediately
2651 * after this phase. See vacuum_error_callback.
2652 */
2653 Assert(vacrel->indname == NULL);
2654 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2655 update_vacuum_error_info(vacrel, &saved_err_info,
2658
2659 istat = vac_cleanup_one_index(&ivinfo, istat);
2660
2661 /* Revert to the previous phase information for error traceback */
2662 restore_vacuum_error_info(vacrel, &saved_err_info);
2663 pfree(vacrel->indname);
2664 vacrel->indname = NULL;
2665
2666 return istat;
2667}
Relation index
Definition: genam.h:48
double num_heap_tuples
Definition: genam.h:54
bool analyze_only
Definition: genam.h:50
BufferAccessStrategy strategy
Definition: genam.h:55
Relation heaprel
Definition: genam.h:49
bool report_progress
Definition: genam.h:51
int message_level
Definition: genam.h:53
bool estimated_count
Definition: genam.h:52
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition: vacuum.c:2536
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
Definition: vacuumlazy.c:3349
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
Definition: vacuumlazy.c:3330

References IndexVacuumInfo::analyze_only, Assert, LVRelState::bstrategy, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel)
static

Definition at line 905 of file vacuumlazy.c.

906{
907 BlockNumber rel_pages = vacrel->rel_pages,
908 blkno,
909 next_fsm_block_to_vacuum = 0;
910 bool all_visible_according_to_vm;
911
912 Buffer vmbuffer = InvalidBuffer;
913 const int initprog_index[] = {
917 };
918 int64 initprog_val[3];
919
920 /* Report that we're scanning the heap, advertising total # of blocks */
921 initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
922 initprog_val[1] = rel_pages;
923 initprog_val[2] = vacrel->dead_items_info->max_bytes;
924 pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
925
926 /* Initialize for the first heap_vac_scan_next_block() call */
929 vacrel->next_unskippable_allvis = false;
931
932 while (heap_vac_scan_next_block(vacrel, &blkno, &all_visible_according_to_vm))
933 {
934 Buffer buf;
935 Page page;
936 bool has_lpdead_items;
937 bool got_cleanup_lock = false;
938
939 vacrel->scanned_pages++;
940
941 /* Report as block scanned, update error traceback information */
944 blkno, InvalidOffsetNumber);
945
947
948 /*
949 * Regularly check if wraparound failsafe should trigger.
950 *
951 * There is a similar check inside lazy_vacuum_all_indexes(), but
952 * relfrozenxid might start to look dangerously old before we reach
953 * that point. This check also provides failsafe coverage for the
954 * one-pass strategy, and the two-pass strategy with the index_cleanup
955 * param set to 'off'.
956 */
957 if (vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0)
959
960 /*
961 * Consider if we definitely have enough space to process TIDs on page
962 * already. If we are close to overrunning the available space for
963 * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
964 * this page.
965 */
967 {
968 /*
969 * Before beginning index vacuuming, we release any pin we may
970 * hold on the visibility map page. This isn't necessary for
971 * correctness, but we do it anyway to avoid holding the pin
972 * across a lengthy, unrelated operation.
973 */
974 if (BufferIsValid(vmbuffer))
975 {
976 ReleaseBuffer(vmbuffer);
977 vmbuffer = InvalidBuffer;
978 }
979
980 /* Perform a round of index and heap vacuuming */
981 vacrel->consider_bypass_optimization = false;
982 lazy_vacuum(vacrel);
983
984 /*
985 * Vacuum the Free Space Map to make newly-freed space visible on
986 * upper-level FSM pages. Note we have not yet processed blkno.
987 */
988 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
989 blkno);
990 next_fsm_block_to_vacuum = blkno;
991
992 /* Report that we are once again scanning the heap */
995 }
996
997 /*
998 * Pin the visibility map page in case we need to mark the page
999 * all-visible. In most cases this will be very cheap, because we'll
1000 * already have the correct page pinned anyway.
1001 */
1002 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
1003
1005 vacrel->bstrategy);
1006 page = BufferGetPage(buf);
1007
1008 /*
1009 * We need a buffer cleanup lock to prune HOT chains and defragment
1010 * the page in lazy_scan_prune. But when it's not possible to acquire
1011 * a cleanup lock right away, we may be able to settle for reduced
1012 * processing using lazy_scan_noprune.
1013 */
1014 got_cleanup_lock = ConditionalLockBufferForCleanup(buf);
1015
1016 if (!got_cleanup_lock)
1018
1019 /* Check for new or empty pages before lazy_scan_[no]prune call */
1020 if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock,
1021 vmbuffer))
1022 {
1023 /* Processed as new/empty page (lock and pin released) */
1024 continue;
1025 }
1026
1027 /*
1028 * If we didn't get the cleanup lock, we can still collect LP_DEAD
1029 * items in the dead_items area for later vacuuming, count live and
1030 * recently dead tuples for vacuum logging, and determine if this
1031 * block could later be truncated. If we encounter any xid/mxids that
1032 * require advancing the relfrozenxid/relminxid, we'll have to wait
1033 * for a cleanup lock and call lazy_scan_prune().
1034 */
1035 if (!got_cleanup_lock &&
1036 !lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
1037 {
1038 /*
1039 * lazy_scan_noprune could not do all required processing. Wait
1040 * for a cleanup lock, and call lazy_scan_prune in the usual way.
1041 */
1042 Assert(vacrel->aggressive);
1045 got_cleanup_lock = true;
1046 }
1047
1048 /*
1049 * If we have a cleanup lock, we must now prune, freeze, and count
1050 * tuples. We may have acquired the cleanup lock originally, or we may
1051 * have gone back and acquired it after lazy_scan_noprune() returned
1052 * false. Either way, the page hasn't been processed yet.
1053 *
1054 * Like lazy_scan_noprune(), lazy_scan_prune() will count
1055 * recently_dead_tuples and live tuples for vacuum logging, determine
1056 * if the block can later be truncated, and accumulate the details of
1057 * remaining LP_DEAD line pointers on the page into dead_items. These
1058 * dead items include those pruned by lazy_scan_prune() as well as
1059 * line pointers previously marked LP_DEAD.
1060 */
1061 if (got_cleanup_lock)
1062 lazy_scan_prune(vacrel, buf, blkno, page,
1063 vmbuffer, all_visible_according_to_vm,
1064 &has_lpdead_items);
1065
1066 /*
1067 * Now drop the buffer lock and, potentially, update the FSM.
1068 *
1069 * Our goal is to update the freespace map the last time we touch the
1070 * page. If we'll process a block in the second pass, we may free up
1071 * additional space on the page, so it is better to update the FSM
1072 * after the second pass. If the relation has no indexes, or if index
1073 * vacuuming is disabled, there will be no second heap pass; if this
1074 * particular page has no dead items, the second heap pass will not
1075 * touch this page. So, in those cases, update the FSM now.
1076 *
1077 * Note: In corner cases, it's possible to miss updating the FSM
1078 * entirely. If index vacuuming is currently enabled, we'll skip the
1079 * FSM update now. But if failsafe mode is later activated, or there
1080 * are so few dead tuples that index vacuuming is bypassed, there will
1081 * also be no opportunity to update the FSM later, because we'll never
1082 * revisit this page. Since updating the FSM is desirable but not
1083 * absolutely required, that's OK.
1084 */
1085 if (vacrel->nindexes == 0
1086 || !vacrel->do_index_vacuuming
1087 || !has_lpdead_items)
1088 {
1089 Size freespace = PageGetHeapFreeSpace(page);
1090
1092 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1093
1094 /*
1095 * Periodically perform FSM vacuuming to make newly-freed space
1096 * visible on upper FSM pages. This is done after vacuuming if the
1097 * table has indexes. There will only be newly-freed space if we
1098 * held the cleanup lock and lazy_scan_prune() was called.
1099 */
1100 if (got_cleanup_lock && vacrel->nindexes == 0 && has_lpdead_items &&
1101 blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1102 {
1103 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1104 blkno);
1105 next_fsm_block_to_vacuum = blkno;
1106 }
1107 }
1108 else
1110 }
1111
1112 vacrel->blkno = InvalidBlockNumber;
1113 if (BufferIsValid(vmbuffer))
1114 ReleaseBuffer(vmbuffer);
1115
1116 /* report that everything is now scanned */
1118
1119 /* now we can compute the new value for pg_class.reltuples */
1120 vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1121 vacrel->scanned_pages,
1122 vacrel->live_tuples);
1123
1124 /*
1125 * Also compute the total number of surviving heap entries. In the
1126 * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1127 */
1128 vacrel->new_rel_tuples =
1129 Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1130 vacrel->missed_dead_tuples;
1131
1132 /*
1133 * Do index vacuuming (call each index's ambulkdelete routine), then do
1134 * related heap vacuuming
1135 */
1136 if (vacrel->dead_items_info->num_items > 0)
1137 lazy_vacuum(vacrel);
1138
1139 /*
1140 * Vacuum the remainder of the Free Space Map. We must do this whether or
1141 * not there were indexes, and whether or not we bypassed index vacuuming.
1142 */
1143 if (blkno > next_fsm_block_to_vacuum)
1144 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno);
1145
1146 /* report all blocks vacuumed */
1148
1149 /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1150 if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1152}
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5180
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5341
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:189
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:980
size_t Size
Definition: c.h:562
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:377
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:194
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:33
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES
Definition: progress.h:26
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
BlockNumber blkno
Definition: vacuumlazy.c:224
void vacuum_delay_point(void)
Definition: vacuum.c:2361
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1314
static void lazy_vacuum(LVRelState *vacrel)
Definition: vacuumlazy.c:2002
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2513
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
Definition: vacuumlazy.c:1791
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
Definition: vacuumlazy.c:1370
static bool heap_vac_scan_next_block(LVRelState *vacrel, BlockNumber *blkno, bool *all_visible_according_to_vm)
Definition: vacuumlazy.c:1173
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items)
Definition: vacuumlazy.c:1509
#define FAILSAFE_EVERY_PAGES
Definition: vacuumlazy.c:147
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:156
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)

References LVRelState::aggressive, Assert, LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage(), BufferIsValid(), ConditionalLockBufferForCleanup(), LVRelState::consider_bypass_optimization, LVRelState::current_block, LVRelState::dead_items, LVRelState::dead_items_info, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, FAILSAFE_EVERY_PAGES, FreeSpaceMapVacuumRange(), heap_vac_scan_next_block(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_new_or_empty(), lazy_scan_noprune(), lazy_scan_prune(), lazy_vacuum(), LVRelState::live_tuples, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, Max, VacDeadItemsInfo::max_bytes, LVRelState::missed_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_vmbuffer, LVRelState::nindexes, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, RBM_NORMAL, ReadBufferExtended(), LVRelState::recently_dead_tuples, RecordPageWithFreeSpace(), LVRelState::rel, LVRelState::rel_pages, ReleaseBuffer(), LVRelState::scanned_pages, TidStoreMemoryUsage(), UnlockReleaseBuffer(), update_vacuum_error_info(), vac_estimate_reltuples(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, and visibilitymap_pin().

Referenced by heap_vacuum_rel().

◆ lazy_scan_new_or_empty()

static bool lazy_scan_new_or_empty ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool  sharelock,
Buffer  vmbuffer 
)
static

Definition at line 1370 of file vacuumlazy.c.

1372{
1373 Size freespace;
1374
1375 if (PageIsNew(page))
1376 {
1377 /*
1378 * All-zeroes pages can be left over if either a backend extends the
1379 * relation by a single page, but crashes before the newly initialized
1380 * page has been written out, or when bulk-extending the relation
1381 * (which creates a number of empty pages at the tail end of the
1382 * relation), and then enters them into the FSM.
1383 *
1384 * Note we do not enter the page into the visibilitymap. That has the
1385 * downside that we repeatedly visit this page in subsequent vacuums,
1386 * but otherwise we'll never discover the space on a promoted standby.
1387 * The harm of repeated checking ought to normally not be too bad. The
1388 * space usually should be used at some point, otherwise there
1389 * wouldn't be any regular vacuums.
1390 *
1391 * Make sure these pages are in the FSM, to ensure they can be reused.
1392 * Do that by testing if there's any space recorded for the page. If
1393 * not, enter it. We do so after releasing the lock on the heap page,
1394 * the FSM is approximate, after all.
1395 */
1397
1398 if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1399 {
1400 freespace = BLCKSZ - SizeOfPageHeaderData;
1401
1402 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1403 }
1404
1405 return true;
1406 }
1407
1408 if (PageIsEmpty(page))
1409 {
1410 /*
1411 * It seems likely that caller will always be able to get a cleanup
1412 * lock on an empty page. But don't take any chances -- escalate to
1413 * an exclusive lock (still don't need a cleanup lock, though).
1414 */
1415 if (sharelock)
1416 {
1419
1420 if (!PageIsEmpty(page))
1421 {
1422 /* page isn't new or empty -- keep lock and pin for now */
1423 return false;
1424 }
1425 }
1426 else
1427 {
1428 /* Already have a full cleanup lock (which is more than enough) */
1429 }
1430
1431 /*
1432 * Unlike new pages, empty pages are always set all-visible and
1433 * all-frozen.
1434 */
1435 if (!PageIsAllVisible(page))
1436 {
1437 uint8 old_vmbits;
1438
1440
1441 /* mark buffer dirty before writing a WAL record */
1443
1444 /*
1445 * It's possible that another backend has extended the heap,
1446 * initialized the page, and then failed to WAL-log the page due
1447 * to an ERROR. Since heap extension is not WAL-logged, recovery
1448 * might try to replay our record setting the page all-visible and
1449 * find that the page isn't initialized, which will cause a PANIC.
1450 * To prevent that, check whether the page has been previously
1451 * WAL-logged, and if not, do that now.
1452 */
1453 if (RelationNeedsWAL(vacrel->rel) &&
1455 log_newpage_buffer(buf, true);
1456
1457 PageSetAllVisible(page);
1458 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
1460 vmbuffer, InvalidTransactionId,
1464
1465 /*
1466 * If the page wasn't already set all-visible and/or all-frozen in
1467 * the VM, count it as newly set for logging.
1468 */
1469 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
1470 {
1471 vacrel->vm_new_visible_pages++;
1473 }
1474 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0)
1475 vacrel->vm_new_frozen_pages++;
1476 }
1477
1478 freespace = PageGetHeapFreeSpace(page);
1480 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1481 return true;
1482 }
1483
1484 /* page isn't new or empty -- keep lock and pin */
1485 return false;
1486}
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2532
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:191
static bool PageIsAllVisible(const PageData *page)
Definition: bufpage.h:429
#define SizeOfPageHeaderData
Definition: bufpage.h:217
static void PageSetAllVisible(Page page)
Definition: bufpage.h:434
static XLogRecPtr PageGetLSN(const PageData *page)
Definition: bufpage.h:386
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:244
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151
#define RelationNeedsWAL(relation)
Definition: rel.h:628
uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1237

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, END_CRIT_SECTION, GetRecordedFreeSpace(), InvalidTransactionId, InvalidXLogRecPtr, LockBuffer(), log_newpage_buffer(), MarkBufferDirty(), PageGetHeapFreeSpace(), PageGetLSN(), PageIsAllVisible(), PageIsEmpty(), PageIsNew(), PageSetAllVisible(), RecordPageWithFreeSpace(), LVRelState::rel, RelationNeedsWAL, SizeOfPageHeaderData, START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, and LVRelState::vm_new_visible_pages.

Referenced by lazy_scan_heap().

◆ lazy_scan_noprune()

static bool lazy_scan_noprune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool *  has_lpdead_items 
)
static

Definition at line 1791 of file vacuumlazy.c.

1796{
1797 OffsetNumber offnum,
1798 maxoff;
1799 int lpdead_items,
1800 live_tuples,
1801 recently_dead_tuples,
1802 missed_dead_tuples;
1803 bool hastup;
1804 HeapTupleHeader tupleheader;
1805 TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
1806 MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
1808
1809 Assert(BufferGetBlockNumber(buf) == blkno);
1810
1811 hastup = false; /* for now */
1812
1813 lpdead_items = 0;
1814 live_tuples = 0;
1815 recently_dead_tuples = 0;
1816 missed_dead_tuples = 0;
1817
1818 maxoff = PageGetMaxOffsetNumber(page);
1819 for (offnum = FirstOffsetNumber;
1820 offnum <= maxoff;
1821 offnum = OffsetNumberNext(offnum))
1822 {
1823 ItemId itemid;
1824 HeapTupleData tuple;
1825
1826 vacrel->offnum = offnum;
1827 itemid = PageGetItemId(page, offnum);
1828
1829 if (!ItemIdIsUsed(itemid))
1830 continue;
1831
1832 if (ItemIdIsRedirected(itemid))
1833 {
1834 hastup = true;
1835 continue;
1836 }
1837
1838 if (ItemIdIsDead(itemid))
1839 {
1840 /*
1841 * Deliberately don't set hastup=true here. See same point in
1842 * lazy_scan_prune for an explanation.
1843 */
1844 deadoffsets[lpdead_items++] = offnum;
1845 continue;
1846 }
1847
1848 hastup = true; /* page prevents rel truncation */
1849 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1850 if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
1851 &NoFreezePageRelfrozenXid,
1852 &NoFreezePageRelminMxid))
1853 {
1854 /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
1855 if (vacrel->aggressive)
1856 {
1857 /*
1858 * Aggressive VACUUMs must always be able to advance rel's
1859 * relfrozenxid to a value >= FreezeLimit (and be able to
1860 * advance rel's relminmxid to a value >= MultiXactCutoff).
1861 * The ongoing aggressive VACUUM won't be able to do that
1862 * unless it can freeze an XID (or MXID) from this tuple now.
1863 *
1864 * The only safe option is to have caller perform processing
1865 * of this page using lazy_scan_prune. Caller might have to
1866 * wait a while for a cleanup lock, but it can't be helped.
1867 */
1868 vacrel->offnum = InvalidOffsetNumber;
1869 return false;
1870 }
1871
1872 /*
1873 * Non-aggressive VACUUMs are under no obligation to advance
1874 * relfrozenxid (even by one XID). We can be much laxer here.
1875 *
1876 * Currently we always just accept an older final relfrozenxid
1877 * and/or relminmxid value. We never make caller wait or work a
1878 * little harder, even when it likely makes sense to do so.
1879 */
1880 }
1881
1882 ItemPointerSet(&(tuple.t_self), blkno, offnum);
1883 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1884 tuple.t_len = ItemIdGetLength(itemid);
1885 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
1886
1887 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
1888 buf))
1889 {
1891 case HEAPTUPLE_LIVE:
1892
1893 /*
1894 * Count both cases as live, just like lazy_scan_prune
1895 */
1896 live_tuples++;
1897
1898 break;
1899 case HEAPTUPLE_DEAD:
1900
1901 /*
1902 * There is some useful work for pruning to do, that won't be
1903 * done due to failure to get a cleanup lock.
1904 */
1905 missed_dead_tuples++;
1906 break;
1908
1909 /*
1910 * Count in recently_dead_tuples, just like lazy_scan_prune
1911 */
1912 recently_dead_tuples++;
1913 break;
1915
1916 /*
1917 * Do not count these rows as live, just like lazy_scan_prune
1918 */
1919 break;
1920 default:
1921 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1922 break;
1923 }
1924 }
1925
1926 vacrel->offnum = InvalidOffsetNumber;
1927
1928 /*
1929 * By here we know for sure that caller can put off freezing and pruning
1930 * this particular page until the next VACUUM. Remember its details now.
1931 * (lazy_scan_prune expects a clean slate, so we have to do this last.)
1932 */
1933 vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
1934 vacrel->NewRelminMxid = NoFreezePageRelminMxid;
1935
1936 /* Save any LP_DEAD items found on the page in dead_items */
1937 if (vacrel->nindexes == 0)
1938 {
1939 /* Using one-pass strategy (since table has no indexes) */
1940 if (lpdead_items > 0)
1941 {
1942 /*
1943 * Perfunctory handling for the corner case where a single pass
1944 * strategy VACUUM cannot get a cleanup lock, and it turns out
1945 * that there is one or more LP_DEAD items: just count the LP_DEAD
1946 * items as missed_dead_tuples instead. (This is a bit dishonest,
1947 * but it beats having to maintain specialized heap vacuuming code
1948 * forever, for vanishingly little benefit.)
1949 */
1950 hastup = true;
1951 missed_dead_tuples += lpdead_items;
1952 }
1953 }
1954 else if (lpdead_items > 0)
1955 {
1956 /*
1957 * Page has LP_DEAD items, and so any references/TIDs that remain in
1958 * indexes will be deleted during index vacuuming (and then marked
1959 * LP_UNUSED in the heap)
1960 */
1961 vacrel->lpdead_item_pages++;
1962
1963 dead_items_add(vacrel, blkno, deadoffsets, lpdead_items);
1964
1965 vacrel->lpdead_items += lpdead_items;
1966 }
1967
1968 /*
1969 * Finally, add relevant page-local counts to whole-VACUUM counts
1970 */
1971 vacrel->live_tuples += live_tuples;
1972 vacrel->recently_dead_tuples += recently_dead_tuples;
1973 vacrel->missed_dead_tuples += missed_dead_tuples;
1974 if (missed_dead_tuples > 0)
1975 vacrel->missed_dead_pages++;
1976
1977 /* Can't truncate this page */
1978 if (hastup)
1979 vacrel->nonempty_pages = blkno + 1;
1980
1981 /* Did we find LP_DEAD items? */
1982 *has_lpdead_items = (lpdead_items > 0);
1983
1984 /* Caller won't need to call lazy_scan_prune with same page */
1985 return true;
1986}
TransactionId MultiXactId
Definition: c.h:619
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition: heapam.c:7724
#define MaxHeapTuplesPerPage
Definition: htup_details.h:572
static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: vacuumlazy.c:3048

References LVRelState::aggressive, Assert, buf, BufferGetBlockNumber(), LVRelState::cutoffs, dead_items_add(), elog, ERROR, FirstOffsetNumber, heap_tuple_should_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by lazy_scan_heap().

◆ lazy_scan_prune()

static void lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
Buffer  vmbuffer,
bool  all_visible_according_to_vm,
bool *  has_lpdead_items 
)
static

Definition at line 1509 of file vacuumlazy.c.

1516{
1517 Relation rel = vacrel->rel;
1518 PruneFreezeResult presult;
1519 int prune_options = 0;
1520
1521 Assert(BufferGetBlockNumber(buf) == blkno);
1522
1523 /*
1524 * Prune all HOT-update chains and potentially freeze tuples on this page.
1525 *
1526 * If the relation has no indexes, we can immediately mark would-be dead
1527 * items LP_UNUSED.
1528 *
1529 * The number of tuples removed from the page is returned in
1530 * presult.ndeleted. It should not be confused with presult.lpdead_items;
1531 * presult.lpdead_items's final value can be thought of as the number of
1532 * tuples that were deleted from indexes.
1533 *
1534 * We will update the VM after collecting LP_DEAD items and freezing
1535 * tuples. Pruning will have determined whether or not the page is
1536 * all-visible.
1537 */
1538 prune_options = HEAP_PAGE_PRUNE_FREEZE;
1539 if (vacrel->nindexes == 0)
1540 prune_options |= HEAP_PAGE_PRUNE_MARK_UNUSED_NOW;
1541
1542 heap_page_prune_and_freeze(rel, buf, vacrel->vistest, prune_options,
1543 &vacrel->cutoffs, &presult, PRUNE_VACUUM_SCAN,
1544 &vacrel->offnum,
1545 &vacrel->NewRelfrozenXid, &vacrel->NewRelminMxid);
1546
1549
1550 if (presult.nfrozen > 0)
1551 {
1552 /*
1553 * We don't increment the new_frozen_tuple_pages instrumentation
1554 * counter when nfrozen == 0, since it only counts pages with newly
1555 * frozen tuples (don't confuse that with pages newly set all-frozen
1556 * in VM).
1557 */
1558 vacrel->new_frozen_tuple_pages++;
1559 }
1560
1561 /*
1562 * VACUUM will call heap_page_is_all_visible() during the second pass over
1563 * the heap to determine all_visible and all_frozen for the page -- this
1564 * is a specialized version of the logic from this function. Now that
1565 * we've finished pruning and freezing, make sure that we're in total
1566 * agreement with heap_page_is_all_visible() using an assertion.
1567 */
1568#ifdef USE_ASSERT_CHECKING
1569 /* Note that all_frozen value does not matter when !all_visible */
1570 if (presult.all_visible)
1571 {
1572 TransactionId debug_cutoff;
1573 bool debug_all_frozen;
1574
1575 Assert(presult.lpdead_items == 0);
1576
1577 if (!heap_page_is_all_visible(vacrel, buf,
1578 &debug_cutoff, &debug_all_frozen))
1579 Assert(false);
1580
1581 Assert(presult.all_frozen == debug_all_frozen);
1582
1583 Assert(!TransactionIdIsValid(debug_cutoff) ||
1584 debug_cutoff == presult.vm_conflict_horizon);
1585 }
1586#endif
1587
1588 /*
1589 * Now save details of the LP_DEAD items from the page in vacrel
1590 */
1591 if (presult.lpdead_items > 0)
1592 {
1593 vacrel->lpdead_item_pages++;
1594
1595 /*
1596 * deadoffsets are collected incrementally in
1597 * heap_page_prune_and_freeze() as each dead line pointer is recorded,
1598 * with an indeterminate order, but dead_items_add requires them to be
1599 * sorted.
1600 */
1601 qsort(presult.deadoffsets, presult.lpdead_items, sizeof(OffsetNumber),
1603
1604 dead_items_add(vacrel, blkno, presult.deadoffsets, presult.lpdead_items);
1605 }
1606
1607 /* Finally, add page-local counts to whole-VACUUM counts */
1608 vacrel->tuples_deleted += presult.ndeleted;
1609 vacrel->tuples_frozen += presult.nfrozen;
1610 vacrel->lpdead_items += presult.lpdead_items;
1611 vacrel->live_tuples += presult.live_tuples;
1612 vacrel->recently_dead_tuples += presult.recently_dead_tuples;
1613
1614 /* Can't truncate this page */
1615 if (presult.hastup)
1616 vacrel->nonempty_pages = blkno + 1;
1617
1618 /* Did we find LP_DEAD items? */
1619 *has_lpdead_items = (presult.lpdead_items > 0);
1620
1621 Assert(!presult.all_visible || !(*has_lpdead_items));
1622
1623 /*
1624 * Handle setting visibility map bit based on information from the VM (as
1625 * of last heap_vac_scan_next_block() call), and from all_visible and
1626 * all_frozen variables
1627 */
1628 if (!all_visible_according_to_vm && presult.all_visible)
1629 {
1630 uint8 old_vmbits;
1632
1633 if (presult.all_frozen)
1634 {
1636 flags |= VISIBILITYMAP_ALL_FROZEN;
1637 }
1638
1639 /*
1640 * It should never be the case that the visibility map page is set
1641 * while the page-level bit is clear, but the reverse is allowed (if
1642 * checksums are not enabled). Regardless, set both bits so that we
1643 * get back in sync.
1644 *
1645 * NB: If the heap page is all-visible but the VM bit is not set, we
1646 * don't need to dirty the heap page. However, if checksums are
1647 * enabled, we do need to make sure that the heap page is dirtied
1648 * before passing it to visibilitymap_set(), because it may be logged.
1649 * Given that this situation should only happen in rare cases after a
1650 * crash, it is not worth optimizing.
1651 */
1652 PageSetAllVisible(page);
1654 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
1656 vmbuffer, presult.vm_conflict_horizon,
1657 flags);
1658
1659 /*
1660 * If the page wasn't already set all-visible and/or all-frozen in the
1661 * VM, count it as newly set for logging.
1662 */
1663 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
1664 {
1665 vacrel->vm_new_visible_pages++;
1666 if (presult.all_frozen)
1668 }
1669 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
1670 presult.all_frozen)
1671 vacrel->vm_new_frozen_pages++;
1672 }
1673
1674 /*
1675 * As of PostgreSQL 9.2, the visibility map bit should never be set if the
1676 * page-level bit is clear. However, it's possible that the bit got
1677 * cleared after heap_vac_scan_next_block() was called, so we must recheck
1678 * with buffer lock before concluding that the VM is corrupt.
1679 */
1680 else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
1681 visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
1682 {
1683 elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1684 vacrel->relname, blkno);
1685 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1687 }
1688
1689 /*
1690 * It's possible for the value returned by
1691 * GetOldestNonRemovableTransactionId() to move backwards, so it's not
1692 * wrong for us to see tuples that appear to not be visible to everyone
1693 * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
1694 * never moves backwards, but GetOldestNonRemovableTransactionId() is
1695 * conservative and sometimes returns a value that's unnecessarily small,
1696 * so if we see that contradiction it just means that the tuples that we
1697 * think are not visible to everyone yet actually are, and the
1698 * PD_ALL_VISIBLE flag is correct.
1699 *
1700 * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
1701 * however.
1702 */
1703 else if (presult.lpdead_items > 0 && PageIsAllVisible(page))
1704 {
1705 elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
1706 vacrel->relname, blkno);
1707 PageClearAllVisible(page);
1709 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1711 }
1712
1713 /*
1714 * If the all-visible page is all-frozen but not marked as such yet, mark
1715 * it as all-frozen. Note that all_frozen is only valid if all_visible is
1716 * true, so we must check both all_visible and all_frozen.
1717 */
1718 else if (all_visible_according_to_vm && presult.all_visible &&
1719 presult.all_frozen && !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
1720 {
1721 uint8 old_vmbits;
1722
1723 /*
1724 * Avoid relying on all_visible_according_to_vm as a proxy for the
1725 * page-level PD_ALL_VISIBLE bit being set, since it might have become
1726 * stale -- even when all_visible is set
1727 */
1728 if (!PageIsAllVisible(page))
1729 {
1730 PageSetAllVisible(page);
1732 }
1733
1734 /*
1735 * Set the page all-frozen (and all-visible) in the VM.
1736 *
1737 * We can pass InvalidTransactionId as our cutoff_xid, since a
1738 * snapshotConflictHorizon sufficient to make everything safe for REDO
1739 * was logged when the page's tuples were frozen.
1740 */
1742 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
1744 vmbuffer, InvalidTransactionId,
1747
1748 /*
1749 * The page was likely already set all-visible in the VM. However,
1750 * there is a small chance that it was modified sometime between
1751 * setting all_visible_according_to_vm and checking the visibility
1752 * during pruning. Check the return value of old_vmbits anyway to
1753 * ensure the visibility map counters used for logging are accurate.
1754 */
1755 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
1756 {
1757 vacrel->vm_new_visible_pages++;
1759 }
1760
1761 /*
1762 * We already checked that the page was not set all-frozen in the VM
1763 * above, so we don't need to test the value of old_vmbits.
1764 */
1765 else
1766 vacrel->vm_new_frozen_pages++;
1767 }
1768}
static void PageClearAllVisible(Page page)
Definition: bufpage.h:439
#define HEAP_PAGE_PRUNE_FREEZE
Definition: heapam.h:43
@ PRUNE_VACUUM_SCAN
Definition: heapam.h:279
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
Definition: heapam.h:42
#define MultiXactIdIsValid(multi)
Definition: multixact.h:28
#define qsort(a, b, c, d)
Definition: port.h:474
void heap_page_prune_and_freeze(Relation relation, Buffer buffer, GlobalVisState *vistest, int options, struct VacuumCutoffs *cutoffs, PruneFreezeResult *presult, PruneReason reason, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
Definition: pruneheap.c:350
int recently_dead_tuples
Definition: heapam.h:243
TransactionId vm_conflict_horizon
Definition: heapam.h:258
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]
Definition: heapam.h:272
bool all_visible
Definition: heapam.h:256
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:3115
static int cmpOffsetNumbers(const void *a, const void *b)
Definition: vacuumlazy.c:1490
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:26
#define VISIBILITYMAP_VALID_BITS

References PruneFreezeResult::all_frozen, PruneFreezeResult::all_visible, Assert, buf, BufferGetBlockNumber(), cmpOffsetNumbers(), LVRelState::cutoffs, dead_items_add(), PruneFreezeResult::deadoffsets, elog, PruneFreezeResult::hastup, heap_page_is_all_visible(), heap_page_prune_and_freeze(), HEAP_PAGE_PRUNE_FREEZE, HEAP_PAGE_PRUNE_MARK_UNUSED_NOW, InvalidTransactionId, InvalidXLogRecPtr, LVRelState::live_tuples, PruneFreezeResult::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, PruneFreezeResult::lpdead_items, MarkBufferDirty(), MultiXactIdIsValid, PruneFreezeResult::ndeleted, LVRelState::new_frozen_tuple_pages, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, PruneFreezeResult::nfrozen, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, PageClearAllVisible(), PageIsAllVisible(), PageSetAllVisible(), PRUNE_VACUUM_SCAN, qsort, LVRelState::recently_dead_tuples, PruneFreezeResult::recently_dead_tuples, LVRelState::rel, LVRelState::relname, TransactionIdIsValid, LVRelState::tuples_deleted, LVRelState::tuples_frozen, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, LVRelState::vistest, VM_ALL_FROZEN, PruneFreezeResult::vm_conflict_horizon, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, and WARNING.

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 2710 of file vacuumlazy.c.

2711{
2712 BlockNumber orig_rel_pages = vacrel->rel_pages;
2713 BlockNumber new_rel_pages;
2714 bool lock_waiter_detected;
2715 int lock_retry;
2716
2717 /* Report that we are now truncating */
2720
2721 /* Update error traceback information one last time */
2724
2725 /*
2726 * Loop until no more truncating can be done.
2727 */
2728 do
2729 {
2730 /*
2731 * We need full exclusive lock on the relation in order to do
2732 * truncation. If we can't get it, give up rather than waiting --- we
2733 * don't want to block other backends, and we don't want to deadlock
2734 * (which is quite possible considering we already hold a lower-grade
2735 * lock).
2736 */
2737 lock_waiter_detected = false;
2738 lock_retry = 0;
2739 while (true)
2740 {
2742 break;
2743
2744 /*
2745 * Check for interrupts while trying to (re-)acquire the exclusive
2746 * lock.
2747 */
2749
2750 if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
2752 {
2753 /*
2754 * We failed to establish the lock in the specified number of
2755 * retries. This means we give up truncating.
2756 */
2757 ereport(vacrel->verbose ? INFO : DEBUG2,
2758 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
2759 vacrel->relname)));
2760 return;
2761 }
2762
2763 (void) WaitLatch(MyLatch,
2766 WAIT_EVENT_VACUUM_TRUNCATE);
2768 }
2769
2770 /*
2771 * Now that we have exclusive lock, look to see if the rel has grown
2772 * whilst we were vacuuming with non-exclusive lock. If so, give up;
2773 * the newly added pages presumably contain non-deletable tuples.
2774 */
2775 new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
2776 if (new_rel_pages != orig_rel_pages)
2777 {
2778 /*
2779 * Note: we intentionally don't update vacrel->rel_pages with the
2780 * new rel size here. If we did, it would amount to assuming that
2781 * the new pages are empty, which is unlikely. Leaving the numbers
2782 * alone amounts to assuming that the new pages have the same
2783 * tuple density as existing ones, which is less unlikely.
2784 */
2786 return;
2787 }
2788
2789 /*
2790 * Scan backwards from the end to verify that the end pages actually
2791 * contain no tuples. This is *necessary*, not optional, because
2792 * other backends could have added tuples to these pages whilst we
2793 * were vacuuming.
2794 */
2795 new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
2796 vacrel->blkno = new_rel_pages;
2797
2798 if (new_rel_pages >= orig_rel_pages)
2799 {
2800 /* can't do anything after all */
2802 return;
2803 }
2804
2805 /*
2806 * Okay to truncate.
2807 */
2808 RelationTruncate(vacrel->rel, new_rel_pages);
2809
2810 /*
2811 * We can release the exclusive lock as soon as we have truncated.
2812 * Other backends can't safely access the relation until they have
2813 * processed the smgr invalidation that smgrtruncate sent out ... but
2814 * that should happen as part of standard invalidation processing once
2815 * they acquire lock on the relation.
2816 */
2818
2819 /*
2820 * Update statistics. Here, it *is* correct to adjust rel_pages
2821 * without also touching reltuples, since the tuple count wasn't
2822 * changed by the truncation.
2823 */
2824 vacrel->removed_pages += orig_rel_pages - new_rel_pages;
2825 vacrel->rel_pages = new_rel_pages;
2826
2827 ereport(vacrel->verbose ? INFO : DEBUG2,
2828 (errmsg("table \"%s\": truncated %u to %u pages",
2829 vacrel->relname,
2830 orig_rel_pages, new_rel_pages)));
2831 orig_rel_pages = new_rel_pages;
2832 } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
2833}
struct Latch * MyLatch
Definition: globals.c:62
void ResetLatch(Latch *latch)
Definition: latch.c:724
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:517
#define WL_TIMEOUT
Definition: latch.h:130
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define WL_LATCH_SET
Definition: latch.h:127
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:309
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:274
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:37
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:288
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:134
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:135
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
Definition: vacuumlazy.c:2841

References AccessExclusiveLock, LVRelState::blkno, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), INFO, InvalidOffsetNumber, MyLatch, LVRelState::nonempty_pages, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationTruncate(), LVRelState::relname, LVRelState::removed_pages, ResetLatch(), UnlockRelation(), update_vacuum_error_info(), VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, LVRelState::verbose, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 2002 of file vacuumlazy.c.

2003{
2004 bool bypass;
2005
2006 /* Should not end up here with no indexes */
2007 Assert(vacrel->nindexes > 0);
2008 Assert(vacrel->lpdead_item_pages > 0);
2009
2010 if (!vacrel->do_index_vacuuming)
2011 {
2012 Assert(!vacrel->do_index_cleanup);
2013 dead_items_reset(vacrel);
2014 return;
2015 }
2016
2017 /*
2018 * Consider bypassing index vacuuming (and heap vacuuming) entirely.
2019 *
2020 * We currently only do this in cases where the number of LP_DEAD items
2021 * for the entire VACUUM operation is close to zero. This avoids sharp
2022 * discontinuities in the duration and overhead of successive VACUUM
2023 * operations that run against the same table with a fixed workload.
2024 * Ideally, successive VACUUM operations will behave as if there are
2025 * exactly zero LP_DEAD items in cases where there are close to zero.
2026 *
2027 * This is likely to be helpful with a table that is continually affected
2028 * by UPDATEs that can mostly apply the HOT optimization, but occasionally
2029 * have small aberrations that lead to just a few heap pages retaining
2030 * only one or two LP_DEAD items. This is pretty common; even when the
2031 * DBA goes out of their way to make UPDATEs use HOT, it is practically
2032 * impossible to predict whether HOT will be applied in 100% of cases.
2033 * It's far easier to ensure that 99%+ of all UPDATEs against a table use
2034 * HOT through careful tuning.
2035 */
2036 bypass = false;
2037 if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
2038 {
2039 BlockNumber threshold;
2040
2041 Assert(vacrel->num_index_scans == 0);
2042 Assert(vacrel->lpdead_items == vacrel->dead_items_info->num_items);
2043 Assert(vacrel->do_index_vacuuming);
2044 Assert(vacrel->do_index_cleanup);
2045
2046 /*
2047 * This crossover point at which we'll start to do index vacuuming is
2048 * expressed as a percentage of the total number of heap pages in the
2049 * table that are known to have at least one LP_DEAD item. This is
2050 * much more important than the total number of LP_DEAD items, since
2051 * it's a proxy for the number of heap pages whose visibility map bits
2052 * cannot be set on account of bypassing index and heap vacuuming.
2053 *
2054 * We apply one further precautionary test: the space currently used
2055 * to store the TIDs (TIDs that now all point to LP_DEAD items) must
2056 * not exceed 32MB. This limits the risk that we will bypass index
2057 * vacuuming again and again until eventually there is a VACUUM whose
2058 * dead_items space is not CPU cache resident.
2059 *
2060 * We don't take any special steps to remember the LP_DEAD items (such
2061 * as counting them in our final update to the stats system) when the
2062 * optimization is applied. Though the accounting used in analyze.c's
2063 * acquire_sample_rows() will recognize the same LP_DEAD items as dead
2064 * rows in its own stats report, that's okay. The discrepancy should
2065 * be negligible. If this optimization is ever expanded to cover more
2066 * cases then this may need to be reconsidered.
2067 */
2068 threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
2069 bypass = (vacrel->lpdead_item_pages < threshold &&
2070 (TidStoreMemoryUsage(vacrel->dead_items) < (32L * 1024L * 1024L)));
2071 }
2072
2073 if (bypass)
2074 {
2075 /*
2076 * There are almost zero TIDs. Behave as if there were precisely
2077 * zero: bypass index vacuuming, but do index cleanup.
2078 *
2079 * We expect that the ongoing VACUUM operation will finish very
2080 * quickly, so there is no point in considering speeding up as a
2081 * failsafe against wraparound failure. (Index cleanup is expected to
2082 * finish very quickly in cases where there were no ambulkdelete()
2083 * calls.)
2084 */
2085 vacrel->do_index_vacuuming = false;
2086 }
2087 else if (lazy_vacuum_all_indexes(vacrel))
2088 {
2089 /*
2090 * We successfully completed a round of index vacuuming. Do related
2091 * heap vacuuming now.
2092 */
2093 lazy_vacuum_heap_rel(vacrel);
2094 }
2095 else
2096 {
2097 /*
2098 * Failsafe case.
2099 *
2100 * We attempted index vacuuming, but didn't finish a full round/full
2101 * index scan. This happens when relfrozenxid or relminmxid is too
2102 * far in the past.
2103 *
2104 * From this point on the VACUUM operation will do no further index
2105 * vacuuming or heap vacuuming. This VACUUM operation won't end up
2106 * back here again.
2107 */
2109 }
2110
2111 /*
2112 * Forget the LP_DEAD items that we just vacuumed (or just decided to not
2113 * vacuum)
2114 */
2115 dead_items_reset(vacrel);
2116}
static void dead_items_reset(LVRelState *vacrel)
Definition: vacuumlazy.c:3070
#define BYPASS_THRESHOLD_PAGES
Definition: vacuumlazy.c:141
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2127
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
Definition: vacuumlazy.c:2244

References Assert, BYPASS_THRESHOLD_PAGES, LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::dead_items_info, dead_items_reset(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, LVRelState::rel_pages, TidStoreMemoryUsage(), and VacuumFailsafeActive.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 2127 of file vacuumlazy.c.

2128{
2129 bool allindexes = true;
2130 double old_live_tuples = vacrel->rel->rd_rel->reltuples;
2131 const int progress_start_index[] = {
2134 };
2135 const int progress_end_index[] = {
2139 };
2140 int64 progress_start_val[2];
2141 int64 progress_end_val[3];
2142
2143 Assert(vacrel->nindexes > 0);
2144 Assert(vacrel->do_index_vacuuming);
2145 Assert(vacrel->do_index_cleanup);
2146
2147 /* Precheck for XID wraparound emergencies */
2149 {
2150 /* Wraparound emergency -- don't even start an index scan */
2151 return false;
2152 }
2153
2154 /*
2155 * Report that we are now vacuuming indexes and the number of indexes to
2156 * vacuum.
2157 */
2158 progress_start_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_INDEX;
2159 progress_start_val[1] = vacrel->nindexes;
2160 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
2161
2162 if (!ParallelVacuumIsActive(vacrel))
2163 {
2164 for (int idx = 0; idx < vacrel->nindexes; idx++)
2165 {
2166 Relation indrel = vacrel->indrels[idx];
2167 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2168
2169 vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat,
2170 old_live_tuples,
2171 vacrel);
2172
2173 /* Report the number of indexes vacuumed */
2175 idx + 1);
2176
2178 {
2179 /* Wraparound emergency -- end current index scan */
2180 allindexes = false;
2181 break;
2182 }
2183 }
2184 }
2185 else
2186 {
2187 /* Outsource everything to parallel variant */
2188 parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples,
2189 vacrel->num_index_scans);
2190
2191 /*
2192 * Do a postcheck to consider applying wraparound failsafe now. Note
2193 * that parallel VACUUM only gets the precheck and this postcheck.
2194 */
2196 allindexes = false;
2197 }
2198
2199 /*
2200 * We delete all LP_DEAD items from the first heap pass in all indexes on
2201 * each call here (except calls where we choose to do the failsafe). This
2202 * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2203 * of the failsafe triggering, which prevents the next call from taking
2204 * place).
2205 */
2206 Assert(vacrel->num_index_scans > 0 ||
2207 vacrel->dead_items_info->num_items == vacrel->lpdead_items);
2208 Assert(allindexes || VacuumFailsafeActive);
2209
2210 /*
2211 * Increase and report the number of index scans. Also, we reset
2212 * PROGRESS_VACUUM_INDEXES_TOTAL and PROGRESS_VACUUM_INDEXES_PROCESSED.
2213 *
2214 * We deliberately include the case where we started a round of bulk
2215 * deletes that we weren't able to finish due to the failsafe triggering.
2216 */
2217 vacrel->num_index_scans++;
2218 progress_end_val[0] = 0;
2219 progress_end_val[1] = 0;
2220 progress_end_val[2] = vacrel->num_index_scans;
2221 pgstat_progress_update_multi_param(3, progress_end_index, progress_end_val);
2222
2223 return allindexes;
2224}
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:34
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
Definition: vacuumlazy.c:2581
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)

References Assert, LVRelState::dead_items_info, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, idx(), LVRelState::indrels, LVRelState::indstats, lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, LVRelState::pvs, RelationData::rd_rel, LVRelState::rel, and VacuumFailsafeActive.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static void lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
OffsetNumber deadoffsets,
int  num_offsets,
Buffer  vmbuffer 
)
static

Definition at line 2337 of file vacuumlazy.c.

2340{
2341 Page page = BufferGetPage(buffer);
2343 int nunused = 0;
2344 TransactionId visibility_cutoff_xid;
2345 bool all_frozen;
2346 LVSavedErrInfo saved_err_info;
2347
2348 Assert(vacrel->do_index_vacuuming);
2349
2351
2352 /* Update error traceback information */
2353 update_vacuum_error_info(vacrel, &saved_err_info,
2356
2358
2359 for (int i = 0; i < num_offsets; i++)
2360 {
2361 ItemId itemid;
2362 OffsetNumber toff = deadoffsets[i];
2363
2364 itemid = PageGetItemId(page, toff);
2365
2366 Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2367 ItemIdSetUnused(itemid);
2368 unused[nunused++] = toff;
2369 }
2370
2371 Assert(nunused > 0);
2372
2373 /* Attempt to truncate line pointer array now */
2375
2376 /*
2377 * Mark buffer dirty before we write WAL.
2378 */
2379 MarkBufferDirty(buffer);
2380
2381 /* XLOG stuff */
2382 if (RelationNeedsWAL(vacrel->rel))
2383 {
2384 log_heap_prune_and_freeze(vacrel->rel, buffer,
2386 false, /* no cleanup lock required */
2388 NULL, 0, /* frozen */
2389 NULL, 0, /* redirected */
2390 NULL, 0, /* dead */
2391 unused, nunused);
2392 }
2393
2394 /*
2395 * End critical section, so we safely can do visibility tests (which
2396 * possibly need to perform IO and allocate memory!). If we crash now the
2397 * page (including the corresponding vm bit) might not be marked all
2398 * visible, but that's fine. A later vacuum will fix that.
2399 */
2401
2402 /*
2403 * Now that we have removed the LP_DEAD items from the page, once again
2404 * check if the page has become all-visible. The page is already marked
2405 * dirty, exclusively locked, and, if needed, a full page image has been
2406 * emitted.
2407 */
2408 Assert(!PageIsAllVisible(page));
2409 if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
2410 &all_frozen))
2411 {
2412 uint8 old_vmbits;
2414
2415 if (all_frozen)
2416 {
2417 Assert(!TransactionIdIsValid(visibility_cutoff_xid));
2418 flags |= VISIBILITYMAP_ALL_FROZEN;
2419 }
2420
2421 PageSetAllVisible(page);
2422 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buffer,
2424 vmbuffer, visibility_cutoff_xid,
2425 flags);
2426
2427 /*
2428 * If the page wasn't already set all-visible and/or all-frozen in the
2429 * VM, count it as newly set for logging.
2430 */
2431 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2432 {
2433 vacrel->vm_new_visible_pages++;
2434 if (all_frozen)
2436 }
2437
2438 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
2439 all_frozen)
2440 vacrel->vm_new_frozen_pages++;
2441 }
2442
2443 /* Revert to the previous phase information for error traceback */
2444 restore_vacuum_error_info(vacrel, &saved_err_info);
2445}
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:824
@ PRUNE_VACUUM_CLEANUP
Definition: heapam.h:280
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Definition: pruneheap.c:2053

References Assert, BufferGetPage(), LVRelState::do_index_vacuuming, END_CRIT_SECTION, heap_page_is_all_visible(), i, InvalidOffsetNumber, InvalidTransactionId, InvalidXLogRecPtr, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, log_heap_prune_and_freeze(), MarkBufferDirty(), MaxHeapTuplesPerPage, PageGetItemId(), PageIsAllVisible(), PageSetAllVisible(), PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PRUNE_VACUUM_CLEANUP, LVRelState::rel, RelationNeedsWAL, restore_vacuum_error_info(), START_CRIT_SECTION, TransactionIdIsValid, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, and LVRelState::vm_new_visible_pages.

Referenced by lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2244 of file vacuumlazy.c.

2245{
2246 BlockNumber vacuumed_pages = 0;
2247 Buffer vmbuffer = InvalidBuffer;
2248 LVSavedErrInfo saved_err_info;
2249 TidStoreIter *iter;
2250 TidStoreIterResult *iter_result;
2251
2252 Assert(vacrel->do_index_vacuuming);
2253 Assert(vacrel->do_index_cleanup);
2254 Assert(vacrel->num_index_scans > 0);
2255
2256 /* Report that we are now vacuuming the heap */
2259
2260 /* Update error traceback information */
2261 update_vacuum_error_info(vacrel, &saved_err_info,
2264
2265 iter = TidStoreBeginIterate(vacrel->dead_items);
2266 while ((iter_result = TidStoreIterateNext(iter)) != NULL)
2267 {
2268 BlockNumber blkno;
2269 Buffer buf;
2270 Page page;
2271 Size freespace;
2273 int num_offsets;
2274
2276
2277 blkno = iter_result->blkno;
2278 vacrel->blkno = blkno;
2279
2280 num_offsets = TidStoreGetBlockOffsets(iter_result, offsets, lengthof(offsets));
2281 Assert(num_offsets <= lengthof(offsets));
2282
2283 /*
2284 * Pin the visibility map page in case we need to mark the page
2285 * all-visible. In most cases this will be very cheap, because we'll
2286 * already have the correct page pinned anyway.
2287 */
2288 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
2289
2290 /* We need a non-cleanup exclusive lock to mark dead_items unused */
2292 vacrel->bstrategy);
2294 lazy_vacuum_heap_page(vacrel, blkno, buf, offsets,
2295 num_offsets, vmbuffer);
2296
2297 /* Now that we've vacuumed the page, record its available space */
2298 page = BufferGetPage(buf);
2299 freespace = PageGetHeapFreeSpace(page);
2300
2302 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
2303 vacuumed_pages++;
2304 }
2305 TidStoreEndIterate(iter);
2306
2307 vacrel->blkno = InvalidBlockNumber;
2308 if (BufferIsValid(vmbuffer))
2309 ReleaseBuffer(vmbuffer);
2310
2311 /*
2312 * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2313 * the second heap pass. No more, no less.
2314 */
2315 Assert(vacrel->num_index_scans > 1 ||
2316 (vacrel->dead_items_info->num_items == vacrel->lpdead_items &&
2317 vacuumed_pages == vacrel->lpdead_item_pages));
2318
2320 (errmsg("table \"%s\": removed %lld dead item identifiers in %u pages",
2321 vacrel->relname, (long long) vacrel->dead_items_info->num_items,
2322 vacuumed_pages)));
2323
2324 /* Revert to the previous phase information for error traceback */
2325 restore_vacuum_error_info(vacrel, &saved_err_info);
2326}
#define lengthof(array)
Definition: c.h:745
#define MaxOffsetNumber
Definition: off.h:28
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:35
BlockNumber blkno
Definition: tidstore.h:29
TidStoreIter * TidStoreBeginIterate(TidStore *ts)
Definition: tidstore.c:471
void TidStoreEndIterate(TidStoreIter *iter)
Definition: tidstore.c:518
TidStoreIterResult * TidStoreIterateNext(TidStoreIter *iter)
Definition: tidstore.c:493
int TidStoreGetBlockOffsets(TidStoreIterResult *result, OffsetNumber *offsets, int max_offsets)
Definition: tidstore.c:566
static void lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
Definition: vacuumlazy.c:2337

References Assert, LVRelState::blkno, TidStoreIterResult::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetPage(), BufferIsValid(), LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, ereport, errmsg(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_vacuum_heap_page(), lengthof, LockBuffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, MaxOffsetNumber, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelState::rel, ReleaseBuffer(), LVRelState::relname, restore_vacuum_error_info(), TidStoreBeginIterate(), TidStoreEndIterate(), TidStoreGetBlockOffsets(), TidStoreIterateNext(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, and visibilitymap_pin().

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 2581 of file vacuumlazy.c.

2583{
2584 IndexVacuumInfo ivinfo;
2585 LVSavedErrInfo saved_err_info;
2586
2587 ivinfo.index = indrel;
2588 ivinfo.heaprel = vacrel->rel;
2589 ivinfo.analyze_only = false;
2590 ivinfo.report_progress = false;
2591 ivinfo.estimated_count = true;
2592 ivinfo.message_level = DEBUG2;
2593 ivinfo.num_heap_tuples = reltuples;
2594 ivinfo.strategy = vacrel->bstrategy;
2595
2596 /*
2597 * Update error traceback information.
2598 *
2599 * The index name is saved during this phase and restored immediately
2600 * after this phase. See vacuum_error_callback.
2601 */
2602 Assert(vacrel->indname == NULL);
2603 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
2604 update_vacuum_error_info(vacrel, &saved_err_info,
2607
2608 /* Do bulk deletion */
2609 istat = vac_bulkdel_one_index(&ivinfo, istat, vacrel->dead_items,
2610 vacrel->dead_items_info);
2611
2612 /* Revert to the previous phase information for error traceback */
2613 restore_vacuum_error_info(vacrel, &saved_err_info);
2614 pfree(vacrel->indname);
2615 vacrel->indname = NULL;
2616
2617 return istat;
2618}
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, TidStore *dead_items, VacDeadItemsInfo *dead_items_info)
Definition: vacuum.c:2515

References IndexVacuumInfo::analyze_only, Assert, LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3349 of file vacuumlazy.c.

3351{
3352 vacrel->blkno = saved_vacrel->blkno;
3353 vacrel->offnum = saved_vacrel->offnum;
3354 vacrel->phase = saved_vacrel->phase;
3355}
BlockNumber blkno
Definition: vacuumlazy.c:291
VacErrPhase phase
Definition: vacuumlazy.c:293
OffsetNumber offnum
Definition: vacuumlazy.c:292

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 2690 of file vacuumlazy.c.

2691{
2692 BlockNumber possibly_freeable;
2693
2694 if (!vacrel->do_rel_truncate || VacuumFailsafeActive)
2695 return false;
2696
2697 possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
2698 if (possibly_freeable > 0 &&
2699 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
2700 possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION))
2701 return true;
2702
2703 return false;
2704}
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:123
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:124

References LVRelState::do_rel_truncate, LVRelState::nonempty_pages, LVRelState::rel_pages, REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, and VacuumFailsafeActive.

Referenced by heap_vacuum_rel().

◆ update_relstats_all_indexes()

static void update_relstats_all_indexes ( LVRelState vacrel)
static

Definition at line 3231 of file vacuumlazy.c.

3232{
3233 Relation *indrels = vacrel->indrels;
3234 int nindexes = vacrel->nindexes;
3235 IndexBulkDeleteResult **indstats = vacrel->indstats;
3236
3237 Assert(vacrel->do_index_cleanup);
3238
3239 for (int idx = 0; idx < nindexes; idx++)
3240 {
3241 Relation indrel = indrels[idx];
3242 IndexBulkDeleteResult *istat = indstats[idx];
3243
3244 if (istat == NULL || istat->estimated_count)
3245 continue;
3246
3247 /* Update index statistics */
3248 vac_update_relstats(indrel,
3249 istat->num_pages,
3250 istat->num_index_tuples,
3251 0,
3252 false,
3255 NULL, NULL, false);
3256 }
3257}
bool estimated_count
Definition: genam.h:80
double num_index_tuples
Definition: genam.h:81

References Assert, LVRelState::do_index_cleanup, IndexBulkDeleteResult::estimated_count, idx(), LVRelState::indrels, LVRelState::indstats, InvalidMultiXactId, InvalidTransactionId, LVRelState::nindexes, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by heap_vacuum_rel().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3330 of file vacuumlazy.c.

3332{
3333 if (saved_vacrel)
3334 {
3335 saved_vacrel->offnum = vacrel->offnum;
3336 saved_vacrel->blkno = vacrel->blkno;
3337 saved_vacrel->phase = vacrel->phase;
3338 }
3339
3340 vacrel->blkno = blkno;
3341 vacrel->offnum = offnum;
3342 vacrel->phase = phase;
3343}

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void *  arg)
static

Definition at line 3266 of file vacuumlazy.c.

3267{
3268 LVRelState *errinfo = arg;
3269
3270 switch (errinfo->phase)
3271 {
3273 if (BlockNumberIsValid(errinfo->blkno))
3274 {
3275 if (OffsetNumberIsValid(errinfo->offnum))
3276 errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3277 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3278 else
3279 errcontext("while scanning block %u of relation \"%s.%s\"",
3280 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3281 }
3282 else
3283 errcontext("while scanning relation \"%s.%s\"",
3284 errinfo->relnamespace, errinfo->relname);
3285 break;
3286
3288 if (BlockNumberIsValid(errinfo->blkno))
3289 {
3290 if (OffsetNumberIsValid(errinfo->offnum))
3291 errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3292 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3293 else
3294 errcontext("while vacuuming block %u of relation \"%s.%s\"",
3295 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3296 }
3297 else
3298 errcontext("while vacuuming relation \"%s.%s\"",
3299 errinfo->relnamespace, errinfo->relname);
3300 break;
3301
3303 errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3304 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3305 break;
3306
3308 errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3309 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3310 break;
3311
3313 if (BlockNumberIsValid(errinfo->blkno))
3314 errcontext("while truncating relation \"%s.%s\" to %u blocks",
3315 errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3316 break;
3317
3319 default:
3320 return; /* do nothing; the errinfo may not be
3321 * initialized */
3322 }
3323}
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define errcontext
Definition: elog.h:196
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
void * arg

References arg, LVRelState::blkno, BlockNumberIsValid(), errcontext, LVRelState::indname, LVRelState::offnum, OffsetNumberIsValid, LVRelState::phase, LVRelState::relname, LVRelState::relnamespace, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().