PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/genam.h"
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/tidstore.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xloginsert.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "common/int.h"
#include "common/pg_prng.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/read_stream.h"
#include "utils/lsyscache.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 
#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2
 
#define EAGER_SCAN_REGION_SIZE   4096
 
#define VAC_BLK_WAS_EAGER_SCANNED   (1 << 0)
 
#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM   (1 << 1)
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel)
 
static void heap_vacuum_eager_scan_setup (LVRelState *vacrel, VacuumParams *params)
 
static BlockNumber heap_vac_scan_next_block (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 
static void find_next_unskippable_block (LVRelState *vacrel, bool *skipsallvis)
 
static bool lazy_scan_new_or_empty (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
 
static void lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items, bool *vm_page_frozen)
 
static bool lazy_scan_noprune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static void lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_add (LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
 
static void dead_items_reset (LVRelState *vacrel)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_is_all_visible (LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
static void update_relstats_all_indexes (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static int cmpOffsetNumbers (const void *a, const void *b)
 
static BlockNumber vacuum_reap_lp_read_stream_next (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 187 of file vacuumlazy.c.

◆ EAGER_SCAN_REGION_SIZE

#define EAGER_SCAN_REGION_SIZE   4096

Definition at line 250 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 193 of file vacuumlazy.c.

◆ MAX_EAGER_FREEZE_SUCCESS_RATE

#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2

Definition at line 241 of file vacuumlazy.c.

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 221 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 215 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 170 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 169 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 209 of file vacuumlazy.c.

◆ VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM

#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM   (1 << 1)

Definition at line 257 of file vacuumlazy.c.

◆ VAC_BLK_WAS_EAGER_SCANNED

#define VAC_BLK_WAS_EAGER_SCANNED   (1 << 0)

Definition at line 256 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 202 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 179 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 181 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 180 of file vacuumlazy.c.

Typedef Documentation

◆ LVRelState

typedef struct LVRelState LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 224 of file vacuumlazy.c.

225{
VacErrPhase
Definition: vacuumlazy.c:225
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition: vacuumlazy.c:227
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition: vacuumlazy.c:228
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition: vacuumlazy.c:231
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition: vacuumlazy.c:230
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition: vacuumlazy.c:229
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition: vacuumlazy.c:226

Function Documentation

◆ cmpOffsetNumbers()

static int cmpOffsetNumbers ( const void *  a,
const void *  b 
)
static

Definition at line 1922 of file vacuumlazy.c.

1923{
1924 return pg_cmp_u16(*(const OffsetNumber *) a, *(const OffsetNumber *) b);
1925}
static int pg_cmp_u16(uint16 a, uint16 b)
Definition: int.h:640
int b
Definition: isn.c:74
int a
Definition: isn.c:73
uint16 OffsetNumber
Definition: off.h:24

References a, b, and pg_cmp_u16().

Referenced by lazy_scan_prune().

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool *  lock_waiter_detected 
)
static

Definition at line 3341 of file vacuumlazy.c.

3342{
3343 BlockNumber blkno;
3344 BlockNumber prefetchedUntil;
3345 instr_time starttime;
3346
3347 /* Initialize the starttime if we check for conflicting lock requests */
3348 INSTR_TIME_SET_CURRENT(starttime);
3349
3350 /*
3351 * Start checking blocks at what we believe relation end to be and move
3352 * backwards. (Strange coding of loop control is needed because blkno is
3353 * unsigned.) To make the scan faster, we prefetch a few blocks at a time
3354 * in forward direction, so that OS-level readahead can kick in.
3355 */
3356 blkno = vacrel->rel_pages;
3358 "prefetch size must be power of 2");
3359 prefetchedUntil = InvalidBlockNumber;
3360 while (blkno > vacrel->nonempty_pages)
3361 {
3362 Buffer buf;
3363 Page page;
3364 OffsetNumber offnum,
3365 maxoff;
3366 bool hastup;
3367
3368 /*
3369 * Check if another process requests a lock on our relation. We are
3370 * holding an AccessExclusiveLock here, so they will be waiting. We
3371 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
3372 * only check if that interval has elapsed once every 32 blocks to
3373 * keep the number of system calls and actual shared lock table
3374 * lookups to a minimum.
3375 */
3376 if ((blkno % 32) == 0)
3377 {
3378 instr_time currenttime;
3379 instr_time elapsed;
3380
3381 INSTR_TIME_SET_CURRENT(currenttime);
3382 elapsed = currenttime;
3383 INSTR_TIME_SUBTRACT(elapsed, starttime);
3384 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
3386 {
3388 {
3389 ereport(vacrel->verbose ? INFO : DEBUG2,
3390 (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
3391 vacrel->relname)));
3392
3393 *lock_waiter_detected = true;
3394 return blkno;
3395 }
3396 starttime = currenttime;
3397 }
3398 }
3399
3400 /*
3401 * We don't insert a vacuum delay point here, because we have an
3402 * exclusive lock on the table which we want to hold for as short a
3403 * time as possible. We still need to check for interrupts however.
3404 */
3406
3407 blkno--;
3408
3409 /* If we haven't prefetched this lot yet, do so now. */
3410 if (prefetchedUntil > blkno)
3411 {
3412 BlockNumber prefetchStart;
3413 BlockNumber pblkno;
3414
3415 prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
3416 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3417 {
3418 PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
3420 }
3421 prefetchedUntil = prefetchStart;
3422 }
3423
3425 vacrel->bstrategy);
3426
3427 /* In this phase we only need shared access to the buffer */
3429
3430 page = BufferGetPage(buf);
3431
3432 if (PageIsNew(page) || PageIsEmpty(page))
3433 {
3435 continue;
3436 }
3437
3438 hastup = false;
3439 maxoff = PageGetMaxOffsetNumber(page);
3440 for (offnum = FirstOffsetNumber;
3441 offnum <= maxoff;
3442 offnum = OffsetNumberNext(offnum))
3443 {
3444 ItemId itemid;
3445
3446 itemid = PageGetItemId(page, offnum);
3447
3448 /*
3449 * Note: any non-unused item should be taken as a reason to keep
3450 * this page. Even an LP_DEAD item makes truncation unsafe, since
3451 * we must not have cleaned out its index entries.
3452 */
3453 if (ItemIdIsUsed(itemid))
3454 {
3455 hastup = true;
3456 break; /* can stop scanning */
3457 }
3458 } /* scan along page */
3459
3461
3462 /* Done scanning if we found a tuple here */
3463 if (hastup)
3464 return blkno + 1;
3465 }
3466
3467 /*
3468 * If we fall out of the loop, all the previously-thought-to-be-empty
3469 * pages still are; we need not bother to look at the last known-nonempty
3470 * page.
3471 */
3472 return vacrel->nonempty_pages;
3473}
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:651
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5390
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5607
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:805
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:197
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:417
@ RBM_NORMAL
Definition: bufmgr.h:46
static bool PageIsEmpty(const PageData *page)
Definition: bufpage.h:224
static bool PageIsNew(const PageData *page)
Definition: bufpage.h:234
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:244
PageData * Page
Definition: bufpage.h:82
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:372
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:909
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define DEBUG2
Definition: elog.h:29
#define INFO
Definition: elog.h:34
#define ereport(elevel,...)
Definition: elog.h:149
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:194
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:367
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define FirstOffsetNumber
Definition: off.h:27
static char * buf
Definition: pg_test_fsync.c:72
@ MAIN_FORKNUM
Definition: relpath.h:58
bool verbose
Definition: vacuumlazy.c:298
BlockNumber nonempty_pages
Definition: vacuumlazy.c:341
Relation rel
Definition: vacuumlazy.c:262
BlockNumber rel_pages
Definition: vacuumlazy.c:313
BufferAccessStrategy bstrategy
Definition: vacuumlazy.c:267
char * relname
Definition: vacuumlazy.c:293
#define PREFETCH_SIZE
Definition: vacuumlazy.c:215
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:179

References AccessExclusiveLock, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BufferGetPage(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelState::nonempty_pages, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageIsNew(), PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelState::rel, LVRelState::rel_pages, LVRelState::relname, StaticAssertStmt, UnlockReleaseBuffer(), VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and LVRelState::verbose.

Referenced by lazy_truncate_heap().

◆ dead_items_add()

static void dead_items_add ( LVRelState vacrel,
BlockNumber  blkno,
OffsetNumber offsets,
int  num_offsets 
)
static

Definition at line 3548 of file vacuumlazy.c.

3550{
3551 const int prog_index[2] = {
3554 };
3555 int64 prog_val[2];
3556
3557 TidStoreSetBlockOffsets(vacrel->dead_items, blkno, offsets, num_offsets);
3558 vacrel->dead_items_info->num_items += num_offsets;
3559
3560 /* update the progress information */
3561 prog_val[0] = vacrel->dead_items_info->num_items;
3562 prog_val[1] = TidStoreMemoryUsage(vacrel->dead_items);
3563 pgstat_progress_update_multi_param(2, prog_index, prog_val);
3564}
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
int64_t int64
Definition: c.h:499
#define PROGRESS_VACUUM_DEAD_TUPLE_BYTES
Definition: progress.h:27
#define PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS
Definition: progress.h:28
VacDeadItemsInfo * dead_items_info
Definition: vacuumlazy.c:311
TidStore * dead_items
Definition: vacuumlazy.c:310
int64 num_items
Definition: vacuum.h:295
void TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: tidstore.c:345
size_t TidStoreMemoryUsage(TidStore *ts)
Definition: tidstore.c:532

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::num_items, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS, TidStoreMemoryUsage(), and TidStoreSetBlockOffsets().

Referenced by lazy_scan_noprune(), and lazy_scan_prune().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 3483 of file vacuumlazy.c.

3484{
3485 VacDeadItemsInfo *dead_items_info;
3486 int vac_work_mem = AmAutoVacuumWorkerProcess() &&
3487 autovacuum_work_mem != -1 ?
3489
3490 /*
3491 * Initialize state for a parallel vacuum. As of now, only one worker can
3492 * be used for an index, so we invoke parallelism only if there are at
3493 * least two indexes on a table.
3494 */
3495 if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
3496 {
3497 /*
3498 * Since parallel workers cannot access data in temporary tables, we
3499 * can't perform parallel vacuum on them.
3500 */
3501 if (RelationUsesLocalBuffers(vacrel->rel))
3502 {
3503 /*
3504 * Give warning only if the user explicitly tries to perform a
3505 * parallel vacuum on the temporary table.
3506 */
3507 if (nworkers > 0)
3509 (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3510 vacrel->relname)));
3511 }
3512 else
3513 vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
3514 vacrel->nindexes, nworkers,
3515 vac_work_mem,
3516 vacrel->verbose ? INFO : DEBUG2,
3517 vacrel->bstrategy);
3518
3519 /*
3520 * If parallel mode started, dead_items and dead_items_info spaces are
3521 * allocated in DSM.
3522 */
3523 if (ParallelVacuumIsActive(vacrel))
3524 {
3526 &vacrel->dead_items_info);
3527 return;
3528 }
3529 }
3530
3531 /*
3532 * Serial VACUUM case. Allocate both dead_items and dead_items_info
3533 * locally.
3534 */
3535
3536 dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));
3537 dead_items_info->max_bytes = vac_work_mem * (Size) 1024;
3538 dead_items_info->num_items = 0;
3539 vacrel->dead_items_info = dead_items_info;
3540
3541 vacrel->dead_items = TidStoreCreateLocal(dead_items_info->max_bytes, true);
3542}
int autovacuum_work_mem
Definition: autovacuum.c:121
size_t Size
Definition: c.h:576
#define WARNING
Definition: elog.h:36
int maintenance_work_mem
Definition: globals.c:134
void * palloc(Size size)
Definition: mcxt.c:1943
#define AmAutoVacuumWorkerProcess()
Definition: miscadmin.h:383
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:648
ParallelVacuumState * pvs
Definition: vacuumlazy.c:268
int nindexes
Definition: vacuumlazy.c:264
Relation * indrels
Definition: vacuumlazy.c:263
bool do_index_vacuuming
Definition: vacuumlazy.c:278
size_t max_bytes
Definition: vacuum.h:294
TidStore * TidStoreCreateLocal(size_t max_bytes, bool insert_only)
Definition: tidstore.c:162
#define ParallelVacuumIsActive(vacrel)
Definition: vacuumlazy.c:221
TidStore * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, VacDeadItemsInfo **dead_items_info_p)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int vac_work_mem, int elevel, BufferAccessStrategy bstrategy)

References AmAutoVacuumWorkerProcess, autovacuum_work_mem, LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_vacuuming, ereport, errmsg(), LVRelState::indrels, INFO, maintenance_work_mem, VacDeadItemsInfo::max_bytes, LVRelState::nindexes, VacDeadItemsInfo::num_items, palloc(), parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, LVRelState::pvs, LVRelState::rel, RelationUsesLocalBuffers, LVRelState::relname, TidStoreCreateLocal(), LVRelState::verbose, and WARNING.

Referenced by heap_vacuum_rel().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 3590 of file vacuumlazy.c.

3591{
3592 if (!ParallelVacuumIsActive(vacrel))
3593 {
3594 /* Don't bother with pfree here */
3595 return;
3596 }
3597
3598 /* End parallel mode */
3599 parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
3600 vacrel->pvs = NULL;
3601}
IndexBulkDeleteResult ** indstats
Definition: vacuumlazy.c:347
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)

References LVRelState::indstats, parallel_vacuum_end(), ParallelVacuumIsActive, and LVRelState::pvs.

Referenced by heap_vacuum_rel().

◆ dead_items_reset()

static void dead_items_reset ( LVRelState vacrel)
static

Definition at line 3570 of file vacuumlazy.c.

3571{
3572 if (ParallelVacuumIsActive(vacrel))
3573 {
3575 return;
3576 }
3577
3578 /* Recreate the tidstore with the same max_bytes limitation */
3579 TidStoreDestroy(vacrel->dead_items);
3580 vacrel->dead_items = TidStoreCreateLocal(vacrel->dead_items_info->max_bytes, true);
3581
3582 /* Reset the counter */
3583 vacrel->dead_items_info->num_items = 0;
3584}
void TidStoreDestroy(TidStore *ts)
Definition: tidstore.c:317
void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::max_bytes, VacDeadItemsInfo::num_items, parallel_vacuum_reset_dead_items(), ParallelVacuumIsActive, LVRelState::pvs, TidStoreCreateLocal(), and TidStoreDestroy().

Referenced by lazy_vacuum().

◆ find_next_unskippable_block()

static void find_next_unskippable_block ( LVRelState vacrel,
bool *  skipsallvis 
)
static

Definition at line 1670 of file vacuumlazy.c.

1671{
1672 BlockNumber rel_pages = vacrel->rel_pages;
1673 BlockNumber next_unskippable_block = vacrel->next_unskippable_block + 1;
1674 Buffer next_unskippable_vmbuffer = vacrel->next_unskippable_vmbuffer;
1675 bool next_unskippable_eager_scanned = false;
1676 bool next_unskippable_allvis;
1677
1678 *skipsallvis = false;
1679
1680 for (;; next_unskippable_block++)
1681 {
1682 uint8 mapbits = visibilitymap_get_status(vacrel->rel,
1683 next_unskippable_block,
1684 &next_unskippable_vmbuffer);
1685
1686 next_unskippable_allvis = (mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0;
1687
1688 /*
1689 * At the start of each eager scan region, normal vacuums with eager
1690 * scanning enabled reset the failure counter, allowing vacuum to
1691 * resume eager scanning if it had been suspended in the previous
1692 * region.
1693 */
1694 if (next_unskippable_block >= vacrel->next_eager_scan_region_start)
1695 {
1699 }
1700
1701 /*
1702 * A block is unskippable if it is not all visible according to the
1703 * visibility map.
1704 */
1705 if (!next_unskippable_allvis)
1706 {
1707 Assert((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0);
1708 break;
1709 }
1710
1711 /*
1712 * Caller must scan the last page to determine whether it has tuples
1713 * (caller must have the opportunity to set vacrel->nonempty_pages).
1714 * This rule avoids having lazy_truncate_heap() take access-exclusive
1715 * lock on rel to attempt a truncation that fails anyway, just because
1716 * there are tuples on the last page (it is likely that there will be
1717 * tuples on other nearby pages as well, but those can be skipped).
1718 *
1719 * Implement this by always treating the last block as unsafe to skip.
1720 */
1721 if (next_unskippable_block == rel_pages - 1)
1722 break;
1723
1724 /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1725 if (!vacrel->skipwithvm)
1726 break;
1727
1728 /*
1729 * All-frozen pages cannot contain XIDs < OldestXmin (XIDs that aren't
1730 * already frozen by now), so this page can be skipped.
1731 */
1732 if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0)
1733 continue;
1734
1735 /*
1736 * Aggressive vacuums cannot skip any all-visible pages that are not
1737 * also all-frozen.
1738 */
1739 if (vacrel->aggressive)
1740 break;
1741
1742 /*
1743 * Normal vacuums with eager scanning enabled only skip all-visible
1744 * but not all-frozen pages if they have hit the failure limit for the
1745 * current eager scan region.
1746 */
1747 if (vacrel->eager_scan_remaining_fails > 0)
1748 {
1749 next_unskippable_eager_scanned = true;
1750 break;
1751 }
1752
1753 /*
1754 * All-visible blocks are safe to skip in a normal vacuum. But
1755 * remember that the final range contains such a block for later.
1756 */
1757 *skipsallvis = true;
1758 }
1759
1760 /* write the local variables back to vacrel */
1761 vacrel->next_unskippable_block = next_unskippable_block;
1762 vacrel->next_unskippable_allvis = next_unskippable_allvis;
1763 vacrel->next_unskippable_eager_scanned = next_unskippable_eager_scanned;
1764 vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
1765}
uint8_t uint8
Definition: c.h:500
Assert(PointerIsAligned(start, uint64))
BlockNumber next_eager_scan_region_start
Definition: vacuumlazy.c:378
bool next_unskippable_eager_scanned
Definition: vacuumlazy.c:363
Buffer next_unskippable_vmbuffer
Definition: vacuumlazy.c:364
BlockNumber eager_scan_remaining_fails
Definition: vacuumlazy.c:410
bool aggressive
Definition: vacuumlazy.c:271
BlockNumber next_unskippable_block
Definition: vacuumlazy.c:361
bool skipwithvm
Definition: vacuumlazy.c:273
bool next_unskippable_allvis
Definition: vacuumlazy.c:362
BlockNumber eager_scan_max_fails_per_region
Definition: vacuumlazy.c:400
#define EAGER_SCAN_REGION_SIZE
Definition: vacuumlazy.c:250
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE

References LVRelState::aggressive, Assert(), LVRelState::eager_scan_max_fails_per_region, EAGER_SCAN_REGION_SIZE, LVRelState::eager_scan_remaining_fails, LVRelState::next_eager_scan_region_start, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::rel, LVRelState::rel_pages, LVRelState::skipwithvm, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_get_status().

Referenced by heap_vac_scan_next_block().

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( LVRelState vacrel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool *  all_frozen 
)
static

Definition at line 3615 of file vacuumlazy.c.

3618{
3619 Page page = BufferGetPage(buf);
3621 OffsetNumber offnum,
3622 maxoff;
3623 bool all_visible = true;
3624
3625 *visibility_cutoff_xid = InvalidTransactionId;
3626 *all_frozen = true;
3627
3628 maxoff = PageGetMaxOffsetNumber(page);
3629 for (offnum = FirstOffsetNumber;
3630 offnum <= maxoff && all_visible;
3631 offnum = OffsetNumberNext(offnum))
3632 {
3633 ItemId itemid;
3634 HeapTupleData tuple;
3635
3636 /*
3637 * Set the offset number so that we can display it along with any
3638 * error that occurred while processing this tuple.
3639 */
3640 vacrel->offnum = offnum;
3641 itemid = PageGetItemId(page, offnum);
3642
3643 /* Unused or redirect line pointers are of no interest */
3644 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
3645 continue;
3646
3647 ItemPointerSet(&(tuple.t_self), blockno, offnum);
3648
3649 /*
3650 * Dead line pointers can have index pointers pointing to them. So
3651 * they can't be treated as visible
3652 */
3653 if (ItemIdIsDead(itemid))
3654 {
3655 all_visible = false;
3656 *all_frozen = false;
3657 break;
3658 }
3659
3660 Assert(ItemIdIsNormal(itemid));
3661
3662 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3663 tuple.t_len = ItemIdGetLength(itemid);
3664 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
3665
3666 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
3667 buf))
3668 {
3669 case HEAPTUPLE_LIVE:
3670 {
3671 TransactionId xmin;
3672
3673 /* Check comments in lazy_scan_prune. */
3675 {
3676 all_visible = false;
3677 *all_frozen = false;
3678 break;
3679 }
3680
3681 /*
3682 * The inserter definitely committed. But is it old enough
3683 * that everyone sees it as committed?
3684 */
3685 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3686 if (!TransactionIdPrecedes(xmin,
3687 vacrel->cutoffs.OldestXmin))
3688 {
3689 all_visible = false;
3690 *all_frozen = false;
3691 break;
3692 }
3693
3694 /* Track newest xmin on page. */
3695 if (TransactionIdFollows(xmin, *visibility_cutoff_xid) &&
3697 *visibility_cutoff_xid = xmin;
3698
3699 /* Check whether this tuple is already frozen or not */
3700 if (all_visible && *all_frozen &&
3702 *all_frozen = false;
3703 }
3704 break;
3705
3706 case HEAPTUPLE_DEAD:
3710 {
3711 all_visible = false;
3712 *all_frozen = false;
3713 break;
3714 }
3715 default:
3716 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3717 break;
3718 }
3719 } /* scan along page */
3720
3721 /* Clear the offset information once we have processed the given page. */
3722 vacrel->offnum = InvalidOffsetNumber;
3723
3724 return all_visible;
3725}
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4231
static Item PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:354
uint32 TransactionId
Definition: c.h:623
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7774
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:126
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:127
@ HEAPTUPLE_LIVE
Definition: heapam.h:125
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:128
@ HEAPTUPLE_DEAD
Definition: heapam.h:124
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
Definition: htup_details.h:324
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
Definition: htup_details.h:337
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define InvalidOffsetNumber
Definition: off.h:26
#define RelationGetRelid(relation)
Definition: rel.h:516
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
OffsetNumber offnum
Definition: vacuumlazy.c:296
struct VacuumCutoffs cutoffs
Definition: vacuumlazy.c:283
TransactionId OldestXmin
Definition: vacuum.h:274
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:280
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:314
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdIsNormal(xid)
Definition: transam.h:42

References Assert(), buf, BufferGetBlockNumber(), BufferGetPage(), LVRelState::cutoffs, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin(), HeapTupleHeaderXminCommitted(), HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, and TransactionIdPrecedes().

Referenced by lazy_scan_prune(), and lazy_vacuum_heap_page().

◆ heap_vac_scan_next_block()

static BlockNumber heap_vac_scan_next_block ( ReadStream stream,
void *  callback_private_data,
void *  per_buffer_data 
)
static

Definition at line 1565 of file vacuumlazy.c.

1568{
1569 BlockNumber next_block;
1570 LVRelState *vacrel = callback_private_data;
1571 uint8 blk_info = 0;
1572
1573 /* relies on InvalidBlockNumber + 1 overflowing to 0 on first call */
1574 next_block = vacrel->current_block + 1;
1575
1576 /* Have we reached the end of the relation? */
1577 if (next_block >= vacrel->rel_pages)
1578 {
1580 {
1583 }
1584 return InvalidBlockNumber;
1585 }
1586
1587 /*
1588 * We must be in one of the three following states:
1589 */
1590 if (next_block > vacrel->next_unskippable_block ||
1592 {
1593 /*
1594 * 1. We have just processed an unskippable block (or we're at the
1595 * beginning of the scan). Find the next unskippable block using the
1596 * visibility map.
1597 */
1598 bool skipsallvis;
1599
1600 find_next_unskippable_block(vacrel, &skipsallvis);
1601
1602 /*
1603 * We now know the next block that we must process. It can be the
1604 * next block after the one we just processed, or something further
1605 * ahead. If it's further ahead, we can jump to it, but we choose to
1606 * do so only if we can skip at least SKIP_PAGES_THRESHOLD consecutive
1607 * pages. Since we're reading sequentially, the OS should be doing
1608 * readahead for us, so there's no gain in skipping a page now and
1609 * then. Skipping such a range might even discourage sequential
1610 * detection.
1611 *
1612 * This test also enables more frequent relfrozenxid advancement
1613 * during non-aggressive VACUUMs. If the range has any all-visible
1614 * pages then skipping makes updating relfrozenxid unsafe, which is a
1615 * real downside.
1616 */
1617 if (vacrel->next_unskippable_block - next_block >= SKIP_PAGES_THRESHOLD)
1618 {
1619 next_block = vacrel->next_unskippable_block;
1620 if (skipsallvis)
1621 vacrel->skippedallvis = true;
1622 }
1623 }
1624
1625 /* Now we must be in one of the two remaining states: */
1626 if (next_block < vacrel->next_unskippable_block)
1627 {
1628 /*
1629 * 2. We are processing a range of blocks that we could have skipped
1630 * but chose not to. We know that they are all-visible in the VM,
1631 * otherwise they would've been unskippable.
1632 */
1633 vacrel->current_block = next_block;
1635 *((uint8 *) per_buffer_data) = blk_info;
1636 return vacrel->current_block;
1637 }
1638 else
1639 {
1640 /*
1641 * 3. We reached the next unskippable block. Process it. On next
1642 * iteration, we will be back in state 1.
1643 */
1644 Assert(next_block == vacrel->next_unskippable_block);
1645
1646 vacrel->current_block = next_block;
1647 if (vacrel->next_unskippable_allvis)
1650 blk_info |= VAC_BLK_WAS_EAGER_SCANNED;
1651 *((uint8 *) per_buffer_data) = blk_info;
1652 return vacrel->current_block;
1653 }
1654}
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5373
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:368
BlockNumber current_block
Definition: vacuumlazy.c:360
bool skippedallvis
Definition: vacuumlazy.c:288
#define VAC_BLK_WAS_EAGER_SCANNED
Definition: vacuumlazy.c:256
static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
Definition: vacuumlazy.c:1670
#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM
Definition: vacuumlazy.c:257
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:209

References Assert(), BufferIsValid(), LVRelState::current_block, find_next_unskippable_block(), InvalidBlockNumber, InvalidBuffer, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::rel_pages, ReleaseBuffer(), SKIP_PAGES_THRESHOLD, LVRelState::skippedallvis, VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM, and VAC_BLK_WAS_EAGER_SCANNED.

Referenced by lazy_scan_heap().

◆ heap_vacuum_eager_scan_setup()

static void heap_vacuum_eager_scan_setup ( LVRelState vacrel,
VacuumParams params 
)
static

Definition at line 488 of file vacuumlazy.c.

489{
490 uint32 randseed;
491 BlockNumber allvisible;
492 BlockNumber allfrozen;
493 float first_region_ratio;
494 bool oldest_unfrozen_before_cutoff = false;
495
496 /*
497 * Initialize eager scan management fields to their disabled values.
498 * Aggressive vacuums, normal vacuums of small tables, and normal vacuums
499 * of tables without sufficiently old tuples disable eager scanning.
500 */
503 vacrel->eager_scan_remaining_fails = 0;
505
506 /* If eager scanning is explicitly disabled, just return. */
507 if (params->max_eager_freeze_failure_rate == 0)
508 return;
509
510 /*
511 * The caller will have determined whether or not an aggressive vacuum is
512 * required by either the vacuum parameters or the relative age of the
513 * oldest unfrozen transaction IDs. An aggressive vacuum must scan every
514 * all-visible page to safely advance the relfrozenxid and/or relminmxid,
515 * so scans of all-visible pages are not considered eager.
516 */
517 if (vacrel->aggressive)
518 return;
519
520 /*
521 * Aggressively vacuuming a small relation shouldn't take long, so it
522 * isn't worth amortizing. We use two times the region size as the size
523 * cutoff because the eager scan start block is a random spot somewhere in
524 * the first region, making the second region the first to be eager
525 * scanned normally.
526 */
527 if (vacrel->rel_pages < 2 * EAGER_SCAN_REGION_SIZE)
528 return;
529
530 /*
531 * We only want to enable eager scanning if we are likely to be able to
532 * freeze some of the pages in the relation.
533 *
534 * Tuples with XIDs older than OldestXmin or MXIDs older than OldestMxact
535 * are technically freezable, but we won't freeze them unless the criteria
536 * for opportunistic freezing is met. Only tuples with XIDs/MXIDs older
537 * than the FreezeLimit/MultiXactCutoff are frozen in the common case.
538 *
539 * So, as a heuristic, we wait until the FreezeLimit has advanced past the
540 * relfrozenxid or the MultiXactCutoff has advanced past the relminmxid to
541 * enable eager scanning.
542 */
545 vacrel->cutoffs.FreezeLimit))
546 oldest_unfrozen_before_cutoff = true;
547
548 if (!oldest_unfrozen_before_cutoff &&
551 vacrel->cutoffs.MultiXactCutoff))
552 oldest_unfrozen_before_cutoff = true;
553
554 if (!oldest_unfrozen_before_cutoff)
555 return;
556
557 /* We have met the criteria to eagerly scan some pages. */
558
559 /*
560 * Our success cap is MAX_EAGER_FREEZE_SUCCESS_RATE of the number of
561 * all-visible but not all-frozen blocks in the relation.
562 */
563 visibilitymap_count(vacrel->rel, &allvisible, &allfrozen);
564
567 (allvisible - allfrozen));
568
569 /* If every all-visible page is frozen, eager scanning is disabled. */
570 if (vacrel->eager_scan_remaining_successes == 0)
571 return;
572
573 /*
574 * Now calculate the bounds of the first eager scan region. Its end block
575 * will be a random spot somewhere in the first EAGER_SCAN_REGION_SIZE
576 * blocks. This affects the bounds of all subsequent regions and avoids
577 * eager scanning and failing to freeze the same blocks each vacuum of the
578 * relation.
579 */
581
583
585 params->max_eager_freeze_failure_rate <= 1);
586
590
591 /*
592 * The first region will be smaller than subsequent regions. As such,
593 * adjust the eager freeze failures tolerated for this region.
594 */
595 first_region_ratio = 1 - (float) vacrel->next_eager_scan_region_start /
597
600 first_region_ratio;
601}
uint32_t uint32
Definition: c.h:502
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3317
#define MultiXactIdIsValid(multi)
Definition: multixact.h:28
uint32 pg_prng_uint32(pg_prng_state *state)
Definition: pg_prng.c:227
pg_prng_state pg_global_prng_state
Definition: pg_prng.c:34
BlockNumber eager_scan_remaining_successes
Definition: vacuumlazy.c:389
TransactionId FreezeLimit
Definition: vacuum.h:284
TransactionId relfrozenxid
Definition: vacuum.h:258
MultiXactId relminmxid
Definition: vacuum.h:259
MultiXactId MultiXactCutoff
Definition: vacuum.h:285
double max_eager_freeze_failure_rate
Definition: vacuum.h:239
#define MAX_EAGER_FREEZE_SUCCESS_RATE
Definition: vacuumlazy.c:241
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)

References LVRelState::aggressive, Assert(), LVRelState::cutoffs, LVRelState::eager_scan_max_fails_per_region, EAGER_SCAN_REGION_SIZE, LVRelState::eager_scan_remaining_fails, LVRelState::eager_scan_remaining_successes, VacuumCutoffs::FreezeLimit, InvalidBlockNumber, VacuumParams::max_eager_freeze_failure_rate, MAX_EAGER_FREEZE_SUCCESS_RATE, VacuumCutoffs::MultiXactCutoff, MultiXactIdIsValid, MultiXactIdPrecedes(), LVRelState::next_eager_scan_region_start, pg_global_prng_state, pg_prng_uint32(), LVRelState::rel, LVRelState::rel_pages, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, TransactionIdIsNormal, TransactionIdPrecedes(), and visibilitymap_count().

Referenced by heap_vacuum_rel().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 615 of file vacuumlazy.c.

617{
618 LVRelState *vacrel;
619 bool verbose,
620 instrument,
621 skipwithvm,
622 frozenxid_updated,
623 minmulti_updated;
624 BlockNumber orig_rel_pages,
625 new_rel_pages,
626 new_rel_allvisible,
627 new_rel_allfrozen;
628 PGRUsage ru0;
629 TimestampTz starttime = 0;
630 PgStat_Counter startreadtime = 0,
631 startwritetime = 0;
632 WalUsage startwalusage = pgWalUsage;
633 BufferUsage startbufferusage = pgBufferUsage;
634 ErrorContextCallback errcallback;
635 char **indnames = NULL;
636
637 verbose = (params->options & VACOPT_VERBOSE) != 0;
638 instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
639 params->log_min_duration >= 0));
640 if (instrument)
641 {
642 pg_rusage_init(&ru0);
643 if (track_io_timing)
644 {
645 startreadtime = pgStatBlockReadTime;
646 startwritetime = pgStatBlockWriteTime;
647 }
648 }
649
650 /* Used for instrumentation and stats report */
651 starttime = GetCurrentTimestamp();
652
654 RelationGetRelid(rel));
655
656 /*
657 * Setup error traceback support for ereport() first. The idea is to set
658 * up an error context callback to display additional information on any
659 * error during a vacuum. During different phases of vacuum, we update
660 * the state so that the error context callback always display current
661 * information.
662 *
663 * Copy the names of heap rel into local memory for error reporting
664 * purposes, too. It isn't always safe to assume that we can get the name
665 * of each rel. It's convenient for code in lazy_scan_heap to always use
666 * these temp copies.
667 */
668 vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
672 vacrel->indname = NULL;
674 vacrel->verbose = verbose;
675 errcallback.callback = vacuum_error_callback;
676 errcallback.arg = vacrel;
677 errcallback.previous = error_context_stack;
678 error_context_stack = &errcallback;
679
680 /* Set up high level stuff about rel and its indexes */
681 vacrel->rel = rel;
683 &vacrel->indrels);
684 vacrel->bstrategy = bstrategy;
685 if (instrument && vacrel->nindexes > 0)
686 {
687 /* Copy index names used by instrumentation (not error reporting) */
688 indnames = palloc(sizeof(char *) * vacrel->nindexes);
689 for (int i = 0; i < vacrel->nindexes; i++)
690 indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
691 }
692
693 /*
694 * The index_cleanup param either disables index vacuuming and cleanup or
695 * forces it to go ahead when we would otherwise apply the index bypass
696 * optimization. The default is 'auto', which leaves the final decision
697 * up to lazy_vacuum().
698 *
699 * The truncate param allows user to avoid attempting relation truncation,
700 * though it can't force truncation to happen.
701 */
704 params->truncate != VACOPTVALUE_AUTO);
705
706 /*
707 * While VacuumFailSafeActive is reset to false before calling this, we
708 * still need to reset it here due to recursive calls.
709 */
710 VacuumFailsafeActive = false;
711 vacrel->consider_bypass_optimization = true;
712 vacrel->do_index_vacuuming = true;
713 vacrel->do_index_cleanup = true;
714 vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
715 if (params->index_cleanup == VACOPTVALUE_DISABLED)
716 {
717 /* Force disable index vacuuming up-front */
718 vacrel->do_index_vacuuming = false;
719 vacrel->do_index_cleanup = false;
720 }
721 else if (params->index_cleanup == VACOPTVALUE_ENABLED)
722 {
723 /* Force index vacuuming. Note that failsafe can still bypass. */
724 vacrel->consider_bypass_optimization = false;
725 }
726 else
727 {
728 /* Default/auto, make all decisions dynamically */
730 }
731
732 /* Initialize page counters explicitly (be tidy) */
733 vacrel->scanned_pages = 0;
734 vacrel->eager_scanned_pages = 0;
735 vacrel->removed_pages = 0;
736 vacrel->new_frozen_tuple_pages = 0;
737 vacrel->lpdead_item_pages = 0;
738 vacrel->missed_dead_pages = 0;
739 vacrel->nonempty_pages = 0;
740 /* dead_items_alloc allocates vacrel->dead_items later on */
741
742 /* Allocate/initialize output statistics state */
743 vacrel->new_rel_tuples = 0;
744 vacrel->new_live_tuples = 0;
745 vacrel->indstats = (IndexBulkDeleteResult **)
746 palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
747
748 /* Initialize remaining counters (be tidy) */
749 vacrel->num_index_scans = 0;
750 vacrel->tuples_deleted = 0;
751 vacrel->tuples_frozen = 0;
752 vacrel->lpdead_items = 0;
753 vacrel->live_tuples = 0;
754 vacrel->recently_dead_tuples = 0;
755 vacrel->missed_dead_tuples = 0;
756
757 vacrel->vm_new_visible_pages = 0;
758 vacrel->vm_new_visible_frozen_pages = 0;
759 vacrel->vm_new_frozen_pages = 0;
760 vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
761
762 /*
763 * Get cutoffs that determine which deleted tuples are considered DEAD,
764 * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
765 * the extent of the blocks that we'll scan in lazy_scan_heap. It has to
766 * happen in this order to ensure that the OldestXmin cutoff field works
767 * as an upper bound on the XIDs stored in the pages we'll actually scan
768 * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
769 *
770 * Next acquire vistest, a related cutoff that's used in pruning. We use
771 * vistest in combination with OldestXmin to ensure that
772 * heap_page_prune_and_freeze() always removes any deleted tuple whose
773 * xmax is < OldestXmin. lazy_scan_prune must never become confused about
774 * whether a tuple should be frozen or removed. (In the future we might
775 * want to teach lazy_scan_prune to recompute vistest from time to time,
776 * to increase the number of dead tuples it can prune away.)
777 */
778 vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
779 vacrel->vistest = GlobalVisTestFor(rel);
780 /* Initialize state used to track oldest extant XID/MXID */
781 vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
782 vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
783
784 /*
785 * Initialize state related to tracking all-visible page skipping. This is
786 * very important to determine whether or not it is safe to advance the
787 * relfrozenxid/relminmxid.
788 */
789 vacrel->skippedallvis = false;
790 skipwithvm = true;
792 {
793 /*
794 * Force aggressive mode, and disable skipping blocks using the
795 * visibility map (even those set all-frozen)
796 */
797 vacrel->aggressive = true;
798 skipwithvm = false;
799 }
800
801 vacrel->skipwithvm = skipwithvm;
802
803 /*
804 * Set up eager scan tracking state. This must happen after determining
805 * whether or not the vacuum must be aggressive, because only normal
806 * vacuums use the eager scan algorithm.
807 */
808 heap_vacuum_eager_scan_setup(vacrel, params);
809
810 if (verbose)
811 {
812 if (vacrel->aggressive)
814 (errmsg("aggressively vacuuming \"%s.%s.%s\"",
815 vacrel->dbname, vacrel->relnamespace,
816 vacrel->relname)));
817 else
819 (errmsg("vacuuming \"%s.%s.%s\"",
820 vacrel->dbname, vacrel->relnamespace,
821 vacrel->relname)));
822 }
823
824 /*
825 * Allocate dead_items memory using dead_items_alloc. This handles
826 * parallel VACUUM initialization as part of allocating shared memory
827 * space used for dead_items. (But do a failsafe precheck first, to
828 * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
829 * is already dangerously old.)
830 */
832 dead_items_alloc(vacrel, params->nworkers);
833
834 /*
835 * Call lazy_scan_heap to perform all required heap pruning, index
836 * vacuuming, and heap vacuuming (plus related processing)
837 */
838 lazy_scan_heap(vacrel);
839
840 /*
841 * Free resources managed by dead_items_alloc. This ends parallel mode in
842 * passing when necessary.
843 */
844 dead_items_cleanup(vacrel);
846
847 /*
848 * Update pg_class entries for each of rel's indexes where appropriate.
849 *
850 * Unlike the later update to rel's pg_class entry, this is not critical.
851 * Maintains relpages/reltuples statistics used by the planner only.
852 */
853 if (vacrel->do_index_cleanup)
855
856 /* Done with rel's indexes */
857 vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
858
859 /* Optionally truncate rel */
860 if (should_attempt_truncation(vacrel))
861 lazy_truncate_heap(vacrel);
862
863 /* Pop the error context stack */
864 error_context_stack = errcallback.previous;
865
866 /* Report that we are now doing final cleanup */
869
870 /*
871 * Prepare to update rel's pg_class entry.
872 *
873 * Aggressive VACUUMs must always be able to advance relfrozenxid to a
874 * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
875 * Non-aggressive VACUUMs may advance them by any amount, or not at all.
876 */
877 Assert(vacrel->NewRelfrozenXid == vacrel->cutoffs.OldestXmin ||
879 vacrel->cutoffs.relfrozenxid,
880 vacrel->NewRelfrozenXid));
881 Assert(vacrel->NewRelminMxid == vacrel->cutoffs.OldestMxact ||
883 vacrel->cutoffs.relminmxid,
884 vacrel->NewRelminMxid));
885 if (vacrel->skippedallvis)
886 {
887 /*
888 * Must keep original relfrozenxid in a non-aggressive VACUUM that
889 * chose to skip an all-visible page range. The state that tracks new
890 * values will have missed unfrozen XIDs from the pages we skipped.
891 */
892 Assert(!vacrel->aggressive);
895 }
896
897 /*
898 * For safety, clamp relallvisible to be not more than what we're setting
899 * pg_class.relpages to
900 */
901 new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
902 visibilitymap_count(rel, &new_rel_allvisible, &new_rel_allfrozen);
903 if (new_rel_allvisible > new_rel_pages)
904 new_rel_allvisible = new_rel_pages;
905
906 /*
907 * An all-frozen block _must_ be all-visible. As such, clamp the count of
908 * all-frozen blocks to the count of all-visible blocks. This matches the
909 * clamping of relallvisible above.
910 */
911 if (new_rel_allfrozen > new_rel_allvisible)
912 new_rel_allfrozen = new_rel_allvisible;
913
914 /*
915 * Now actually update rel's pg_class entry.
916 *
917 * In principle new_live_tuples could be -1 indicating that we (still)
918 * don't know the tuple count. In practice that can't happen, since we
919 * scan every page that isn't skipped using the visibility map.
920 */
921 vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
922 new_rel_allvisible, new_rel_allfrozen,
923 vacrel->nindexes > 0,
924 vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
925 &frozenxid_updated, &minmulti_updated, false);
926
927 /*
928 * Report results to the cumulative stats system, too.
929 *
930 * Deliberately avoid telling the stats system about LP_DEAD items that
931 * remain in the table due to VACUUM bypassing index and heap vacuuming.
932 * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
933 * It seems like a good idea to err on the side of not vacuuming again too
934 * soon in cases where the failsafe prevented significant amounts of heap
935 * vacuuming.
936 */
938 rel->rd_rel->relisshared,
939 Max(vacrel->new_live_tuples, 0),
940 vacrel->recently_dead_tuples +
941 vacrel->missed_dead_tuples,
942 starttime);
944
945 if (instrument)
946 {
948
949 if (verbose || params->log_min_duration == 0 ||
950 TimestampDifferenceExceeds(starttime, endtime,
951 params->log_min_duration))
952 {
953 long secs_dur;
954 int usecs_dur;
955 WalUsage walusage;
956 BufferUsage bufferusage;
958 char *msgfmt;
959 int32 diff;
960 double read_rate = 0,
961 write_rate = 0;
962 int64 total_blks_hit;
963 int64 total_blks_read;
964 int64 total_blks_dirtied;
965
966 TimestampDifference(starttime, endtime, &secs_dur, &usecs_dur);
967 memset(&walusage, 0, sizeof(WalUsage));
968 WalUsageAccumDiff(&walusage, &pgWalUsage, &startwalusage);
969 memset(&bufferusage, 0, sizeof(BufferUsage));
970 BufferUsageAccumDiff(&bufferusage, &pgBufferUsage, &startbufferusage);
971
972 total_blks_hit = bufferusage.shared_blks_hit +
973 bufferusage.local_blks_hit;
974 total_blks_read = bufferusage.shared_blks_read +
975 bufferusage.local_blks_read;
976 total_blks_dirtied = bufferusage.shared_blks_dirtied +
977 bufferusage.local_blks_dirtied;
978
980 if (verbose)
981 {
982 /*
983 * Aggressiveness already reported earlier, in dedicated
984 * VACUUM VERBOSE ereport
985 */
986 Assert(!params->is_wraparound);
987 msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
988 }
989 else if (params->is_wraparound)
990 {
991 /*
992 * While it's possible for a VACUUM to be both is_wraparound
993 * and !aggressive, that's just a corner-case -- is_wraparound
994 * implies aggressive. Produce distinct output for the corner
995 * case all the same, just in case.
996 */
997 if (vacrel->aggressive)
998 msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
999 else
1000 msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1001 }
1002 else
1003 {
1004 if (vacrel->aggressive)
1005 msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
1006 else
1007 msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
1008 }
1009 appendStringInfo(&buf, msgfmt,
1010 vacrel->dbname,
1011 vacrel->relnamespace,
1012 vacrel->relname,
1013 vacrel->num_index_scans);
1014 appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total), %u eagerly scanned\n"),
1015 vacrel->removed_pages,
1016 new_rel_pages,
1017 vacrel->scanned_pages,
1018 orig_rel_pages == 0 ? 100.0 :
1019 100.0 * vacrel->scanned_pages /
1020 orig_rel_pages,
1021 vacrel->eager_scanned_pages);
1023 _("tuples: %" PRId64 " removed, %" PRId64 " remain, %" PRId64 " are dead but not yet removable\n"),
1024 vacrel->tuples_deleted,
1025 (int64) vacrel->new_rel_tuples,
1026 vacrel->recently_dead_tuples);
1027 if (vacrel->missed_dead_tuples > 0)
1029 _("tuples missed: %" PRId64 " dead from %u pages not removed due to cleanup lock contention\n"),
1030 vacrel->missed_dead_tuples,
1031 vacrel->missed_dead_pages);
1032 diff = (int32) (ReadNextTransactionId() -
1033 vacrel->cutoffs.OldestXmin);
1035 _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
1036 vacrel->cutoffs.OldestXmin, diff);
1037 if (frozenxid_updated)
1038 {
1039 diff = (int32) (vacrel->NewRelfrozenXid -
1040 vacrel->cutoffs.relfrozenxid);
1042 _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
1043 vacrel->NewRelfrozenXid, diff);
1044 }
1045 if (minmulti_updated)
1046 {
1047 diff = (int32) (vacrel->NewRelminMxid -
1048 vacrel->cutoffs.relminmxid);
1050 _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
1051 vacrel->NewRelminMxid, diff);
1052 }
1053 appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %" PRId64 " tuples frozen\n"),
1054 vacrel->new_frozen_tuple_pages,
1055 orig_rel_pages == 0 ? 100.0 :
1056 100.0 * vacrel->new_frozen_tuple_pages /
1057 orig_rel_pages,
1058 vacrel->tuples_frozen);
1059
1061 _("visibility map: %u pages set all-visible, %u pages set all-frozen (%u were all-visible)\n"),
1062 vacrel->vm_new_visible_pages,
1064 vacrel->vm_new_frozen_pages,
1065 vacrel->vm_new_frozen_pages);
1066 if (vacrel->do_index_vacuuming)
1067 {
1068 if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
1069 appendStringInfoString(&buf, _("index scan not needed: "));
1070 else
1071 appendStringInfoString(&buf, _("index scan needed: "));
1072
1073 msgfmt = _("%u pages from table (%.2f%% of total) had %" PRId64 " dead item identifiers removed\n");
1074 }
1075 else
1076 {
1078 appendStringInfoString(&buf, _("index scan bypassed: "));
1079 else
1080 appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
1081
1082 msgfmt = _("%u pages from table (%.2f%% of total) have %" PRId64 " dead item identifiers\n");
1083 }
1084 appendStringInfo(&buf, msgfmt,
1085 vacrel->lpdead_item_pages,
1086 orig_rel_pages == 0 ? 100.0 :
1087 100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
1088 vacrel->lpdead_items);
1089 for (int i = 0; i < vacrel->nindexes; i++)
1090 {
1091 IndexBulkDeleteResult *istat = vacrel->indstats[i];
1092
1093 if (!istat)
1094 continue;
1095
1097 _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
1098 indnames[i],
1099 istat->num_pages,
1100 istat->pages_newly_deleted,
1101 istat->pages_deleted,
1102 istat->pages_free);
1103 }
1105 {
1106 /*
1107 * We bypass the changecount mechanism because this value is
1108 * only updated by the calling process. We also rely on the
1109 * above call to pgstat_progress_end_command() to not clear
1110 * the st_progress_param array.
1111 */
1112 appendStringInfo(&buf, _("delay time: %.3f ms\n"),
1114 }
1115 if (track_io_timing)
1116 {
1117 double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
1118 double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
1119
1120 appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
1121 read_ms, write_ms);
1122 }
1123 if (secs_dur > 0 || usecs_dur > 0)
1124 {
1125 read_rate = (double) BLCKSZ * total_blks_read /
1126 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1127 write_rate = (double) BLCKSZ * total_blks_dirtied /
1128 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1129 }
1130 appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
1131 read_rate, write_rate);
1133 _("buffer usage: %" PRId64 " hits, %" PRId64 " reads, %" PRId64 " dirtied\n"),
1134 total_blks_hit,
1135 total_blks_read,
1136 total_blks_dirtied);
1138 _("WAL usage: %" PRId64 " records, %" PRId64 " full page images, %" PRIu64 " bytes, %" PRId64 " buffers full\n"),
1139 walusage.wal_records,
1140 walusage.wal_fpi,
1141 walusage.wal_bytes,
1142 walusage.wal_buffers_full);
1143 appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
1144
1145 ereport(verbose ? INFO : LOG,
1146 (errmsg_internal("%s", buf.data)));
1147 pfree(buf.data);
1148 }
1149 }
1150
1151 /* Cleanup index statistics and index names */
1152 for (int i = 0; i < vacrel->nindexes; i++)
1153 {
1154 if (vacrel->indstats[i])
1155 pfree(vacrel->indstats[i]);
1156
1157 if (instrument)
1158 pfree(indnames[i]);
1159 }
1160}
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1721
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1781
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1645
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
PgBackendStatus * MyBEEntry
bool track_io_timing
Definition: bufmgr.c:147
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:283
#define Max(x, y)
Definition: c.h:969
int32_t int32
Definition: c.h:498
int64 TimestampTz
Definition: timestamp.h:39
char * get_database_name(Oid dbid)
Definition: dbcommands.c:3188
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1158
ErrorContextCallback * error_context_stack
Definition: elog.c:95
#define _(x)
Definition: elog.c:91
#define LOG
Definition: elog.h:31
Oid MyDatabaseId
Definition: globals.c:95
int verbose
WalUsage pgWalUsage
Definition: instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition: instrument.c:287
BufferUsage pgBufferUsage
Definition: instrument.c:20
void BufferUsageAccumDiff(BufferUsage *dst, const BufferUsage *add, const BufferUsage *sub)
Definition: instrument.c:248
int i
Definition: isn.c:77
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3506
char * pstrdup(const char *in)
Definition: mcxt.c:2325
void pfree(void *pointer)
Definition: mcxt.c:2150
void * palloc0(Size size)
Definition: mcxt.c:1973
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3331
#define InvalidMultiXactId
Definition: multixact.h:24
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int64 PgStat_Counter
Definition: pgstat.h:65
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples, TimestampTz starttime)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4107
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:39
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define PROGRESS_VACUUM_DELAY_TIME
Definition: progress.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:550
#define RelationGetNamespace(relation)
Definition: rel.h:557
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:145
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:230
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
int64 shared_blks_dirtied
Definition: instrument.h:28
int64 local_blks_hit
Definition: instrument.h:30
int64 shared_blks_read
Definition: instrument.h:27
int64 local_blks_read
Definition: instrument.h:31
int64 local_blks_dirtied
Definition: instrument.h:32
int64 shared_blks_hit
Definition: instrument.h:26
struct ErrorContextCallback * previous
Definition: elog.h:296
void(* callback)(void *arg)
Definition: elog.h:297
BlockNumber pages_deleted
Definition: genam.h:105
BlockNumber pages_newly_deleted
Definition: genam.h:104
BlockNumber pages_free
Definition: genam.h:106
BlockNumber num_pages
Definition: genam.h:100
BlockNumber vm_new_frozen_pages
Definition: vacuumlazy.c:337
int64 tuples_deleted
Definition: vacuumlazy.c:352
bool do_rel_truncate
Definition: vacuumlazy.c:280
BlockNumber scanned_pages
Definition: vacuumlazy.c:314
BlockNumber new_frozen_tuple_pages
Definition: vacuumlazy.c:323
GlobalVisState * vistest
Definition: vacuumlazy.c:284
BlockNumber removed_pages
Definition: vacuumlazy.c:322
int num_index_scans
Definition: vacuumlazy.c:350
double new_live_tuples
Definition: vacuumlazy.c:345
double new_rel_tuples
Definition: vacuumlazy.c:344
TransactionId NewRelfrozenXid
Definition: vacuumlazy.c:286
bool consider_bypass_optimization
Definition: vacuumlazy.c:275
int64 recently_dead_tuples
Definition: vacuumlazy.c:356
int64 tuples_frozen
Definition: vacuumlazy.c:353
char * dbname
Definition: vacuumlazy.c:291
BlockNumber missed_dead_pages
Definition: vacuumlazy.c:340
char * relnamespace
Definition: vacuumlazy.c:292
int64 live_tuples
Definition: vacuumlazy.c:355
int64 lpdead_items
Definition: vacuumlazy.c:354
BlockNumber lpdead_item_pages
Definition: vacuumlazy.c:339
BlockNumber eager_scanned_pages
Definition: vacuumlazy.c:320
bool do_index_cleanup
Definition: vacuumlazy.c:279
MultiXactId NewRelminMxid
Definition: vacuumlazy.c:287
int64 missed_dead_tuples
Definition: vacuumlazy.c:357
BlockNumber vm_new_visible_pages
Definition: vacuumlazy.c:326
VacErrPhase phase
Definition: vacuumlazy.c:297
char * indname
Definition: vacuumlazy.c:294
BlockNumber vm_new_visible_frozen_pages
Definition: vacuumlazy.c:334
int64 st_progress_param[PGSTAT_NUM_PROGRESS_PARAM]
Form_pg_class rd_rel
Definition: rel.h:111
MultiXactId OldestMxact
Definition: vacuum.h:275
int nworkers
Definition: vacuum.h:246
VacOptValue truncate
Definition: vacuum.h:231
bits32 options
Definition: vacuum.h:219
bool is_wraparound
Definition: vacuum.h:226
int log_min_duration
Definition: vacuum.h:227
VacOptValue index_cleanup
Definition: vacuum.h:230
int64 wal_buffers_full
Definition: instrument.h:56
uint64 wal_bytes
Definition: instrument.h:55
int64 wal_fpi
Definition: instrument.h:54
int64 wal_records
Definition: instrument.h:53
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:299
static TransactionId ReadNextTransactionId(void)
Definition: transam.h:315
bool track_cost_delay_timing
Definition: vacuum.c:80
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:2340
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:2383
bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params, struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1104
bool VacuumFailsafeActive
Definition: vacuum.c:108
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, BlockNumber num_all_frozen_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
Definition: vacuum.c:1430
#define VACOPT_VERBOSE
Definition: vacuum.h:182
@ VACOPTVALUE_AUTO
Definition: vacuum.h:203
@ VACOPTVALUE_ENABLED
Definition: vacuum.h:205
@ VACOPTVALUE_UNSPECIFIED
Definition: vacuum.h:202
@ VACOPTVALUE_DISABLED
Definition: vacuum.h:204
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition: vacuum.h:188
static void dead_items_cleanup(LVRelState *vacrel)
Definition: vacuumlazy.c:3590
static void update_relstats_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3731
static void vacuum_error_callback(void *arg)
Definition: vacuumlazy.c:3766
static void lazy_truncate_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:3210
static bool should_attempt_truncation(LVRelState *vacrel)
Definition: vacuumlazy.c:3190
static void lazy_scan_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:1199
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
Definition: vacuumlazy.c:2960
static void heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params)
Definition: vacuumlazy.c:488
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
Definition: vacuumlazy.c:3483
bool IsInParallelMode(void)
Definition: xact.c:1089

References _, LVRelState::aggressive, AmAutoVacuumWorkerProcess, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert(), LVRelState::bstrategy, buf, BufferUsageAccumDiff(), ErrorContextCallback::callback, LVRelState::consider_bypass_optimization, LVRelState::cutoffs, LVRelState::dbname, dead_items_alloc(), dead_items_cleanup(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, LVRelState::eager_scanned_pages, ereport, errmsg(), errmsg_internal(), error_context_stack, VacuumCutoffs::FreezeLimit, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), GlobalVisTestFor(), heap_vacuum_eager_scan_setup(), i, VacuumParams::index_cleanup, LVRelState::indname, LVRelState::indrels, LVRelState::indstats, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsInParallelMode(), lazy_check_wraparound_failsafe(), lazy_scan_heap(), lazy_truncate_heap(), LVRelState::live_tuples, BufferUsage::local_blks_dirtied, BufferUsage::local_blks_hit, BufferUsage::local_blks_read, LOG, VacuumParams::log_min_duration, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, Max, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, VacuumCutoffs::MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyBEEntry, MyDatabaseId, LVRelState::new_frozen_tuple_pages, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, NoLock, LVRelState::nonempty_pages, LVRelState::num_index_scans, IndexBulkDeleteResult::num_pages, VacuumParams::nworkers, VacuumCutoffs::OldestMxact, VacuumCutoffs::OldestXmin, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgBufferUsage, pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, LVRelState::phase, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_DELAY_TIME, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, pstrdup(), RelationData::rd_rel, ReadNextTransactionId(), LVRelState::recently_dead_tuples, LVRelState::rel, LVRelState::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, LVRelState::relname, LVRelState::relnamespace, LVRelState::removed_pages, RowExclusiveLock, LVRelState::scanned_pages, BufferUsage::shared_blks_dirtied, BufferUsage::shared_blks_hit, BufferUsage::shared_blks_read, should_attempt_truncation(), LVRelState::skippedallvis, LVRelState::skipwithvm, PgBackendStatus::st_progress_param, TimestampDifference(), TimestampDifferenceExceeds(), track_cost_delay_timing, track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelState::tuples_deleted, LVRelState::tuples_frozen, update_relstats_all_indexes(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_get_cutoffs(), VacuumFailsafeActive, LVRelState::verbose, verbose, visibilitymap_count(), LVRelState::vistest, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, WalUsage::wal_buffers_full, WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_records, and WalUsageAccumDiff().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 2960 of file vacuumlazy.c.

2961{
2962 /* Don't warn more than once per VACUUM */
2964 return true;
2965
2967 {
2968 const int progress_index[] = {
2971 };
2972 int64 progress_val[2] = {0, 0};
2973
2974 VacuumFailsafeActive = true;
2975
2976 /*
2977 * Abandon use of a buffer access strategy to allow use of all of
2978 * shared buffers. We assume the caller who allocated the memory for
2979 * the BufferAccessStrategy will free it.
2980 */
2981 vacrel->bstrategy = NULL;
2982
2983 /* Disable index vacuuming, index cleanup, and heap rel truncation */
2984 vacrel->do_index_vacuuming = false;
2985 vacrel->do_index_cleanup = false;
2986 vacrel->do_rel_truncate = false;
2987
2988 /* Reset the progress counters */
2989 pgstat_progress_update_multi_param(2, progress_index, progress_val);
2990
2992 (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2993 vacrel->dbname, vacrel->relnamespace, vacrel->relname,
2994 vacrel->num_index_scans),
2995 errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
2996 errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2997 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2998
2999 /* Stop applying cost limits from this point on */
3000 VacuumCostActive = false;
3002
3003 return true;
3004 }
3005
3006 return false;
3007}
#define unlikely(x)
Definition: c.h:347
int errdetail(const char *fmt,...)
Definition: elog.c:1204
int errhint(const char *fmt,...)
Definition: elog.c:1318
bool VacuumCostActive
Definition: globals.c:159
int VacuumCostBalance
Definition: globals.c:158
#define PROGRESS_VACUUM_INDEXES_PROCESSED
Definition: progress.h:30
#define PROGRESS_VACUUM_INDEXES_TOTAL
Definition: progress.h:29
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1272

References LVRelState::bstrategy, LVRelState::cutoffs, LVRelState::dbname, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errdetail(), errhint(), errmsg(), LVRelState::num_index_scans, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, LVRelState::relname, LVRelState::relnamespace, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, VacuumFailsafeActive, and WARNING.

Referenced by heap_vacuum_rel(), lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 3013 of file vacuumlazy.c.

3014{
3015 double reltuples = vacrel->new_rel_tuples;
3016 bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
3017 const int progress_start_index[] = {
3020 };
3021 const int progress_end_index[] = {
3024 };
3025 int64 progress_start_val[2];
3026 int64 progress_end_val[2] = {0, 0};
3027
3028 Assert(vacrel->do_index_cleanup);
3029 Assert(vacrel->nindexes > 0);
3030
3031 /*
3032 * Report that we are now cleaning up indexes and the number of indexes to
3033 * cleanup.
3034 */
3035 progress_start_val[0] = PROGRESS_VACUUM_PHASE_INDEX_CLEANUP;
3036 progress_start_val[1] = vacrel->nindexes;
3037 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
3038
3039 if (!ParallelVacuumIsActive(vacrel))
3040 {
3041 for (int idx = 0; idx < vacrel->nindexes; idx++)
3042 {
3043 Relation indrel = vacrel->indrels[idx];
3044 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
3045
3046 vacrel->indstats[idx] =
3047 lazy_cleanup_one_index(indrel, istat, reltuples,
3048 estimated_count, vacrel);
3049
3050 /* Report the number of indexes cleaned up */
3052 idx + 1);
3053 }
3054 }
3055 else
3056 {
3057 /* Outsource everything to parallel variant */
3058 parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
3059 vacrel->num_index_scans,
3060 estimated_count);
3061 }
3062
3063 /* Reset the progress counters */
3064 pgstat_progress_update_multi_param(2, progress_end_index, progress_end_val);
3065}
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:262
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:37
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
Definition: vacuumlazy.c:3130
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)

References Assert(), LVRelState::do_index_cleanup, idx(), LVRelState::indrels, LVRelState::indstats, lazy_cleanup_one_index(), LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::num_index_scans, parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelState::pvs, LVRelState::rel_pages, and LVRelState::scanned_pages.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 3130 of file vacuumlazy.c.

3133{
3134 IndexVacuumInfo ivinfo;
3135 LVSavedErrInfo saved_err_info;
3136
3137 ivinfo.index = indrel;
3138 ivinfo.heaprel = vacrel->rel;
3139 ivinfo.analyze_only = false;
3140 ivinfo.report_progress = false;
3141 ivinfo.estimated_count = estimated_count;
3142 ivinfo.message_level = DEBUG2;
3143
3144 ivinfo.num_heap_tuples = reltuples;
3145 ivinfo.strategy = vacrel->bstrategy;
3146
3147 /*
3148 * Update error traceback information.
3149 *
3150 * The index name is saved during this phase and restored immediately
3151 * after this phase. See vacuum_error_callback.
3152 */
3153 Assert(vacrel->indname == NULL);
3154 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
3155 update_vacuum_error_info(vacrel, &saved_err_info,
3158
3159 istat = vac_cleanup_one_index(&ivinfo, istat);
3160
3161 /* Revert to the previous phase information for error traceback */
3162 restore_vacuum_error_info(vacrel, &saved_err_info);
3163 pfree(vacrel->indname);
3164 vacrel->indname = NULL;
3165
3166 return istat;
3167}
Relation index
Definition: genam.h:69
double num_heap_tuples
Definition: genam.h:75
bool analyze_only
Definition: genam.h:71
BufferAccessStrategy strategy
Definition: genam.h:76
Relation heaprel
Definition: genam.h:70
bool report_progress
Definition: genam.h:72
int message_level
Definition: genam.h:74
bool estimated_count
Definition: genam.h:73
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition: vacuum.c:2632
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
Definition: vacuumlazy.c:3849
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
Definition: vacuumlazy.c:3830

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel)
static

Definition at line 1199 of file vacuumlazy.c.

1200{
1201 ReadStream *stream;
1202 BlockNumber rel_pages = vacrel->rel_pages,
1203 blkno = 0,
1204 next_fsm_block_to_vacuum = 0;
1205 BlockNumber orig_eager_scan_success_limit =
1206 vacrel->eager_scan_remaining_successes; /* for logging */
1207 Buffer vmbuffer = InvalidBuffer;
1208 const int initprog_index[] = {
1212 };
1213 int64 initprog_val[3];
1214
1215 /* Report that we're scanning the heap, advertising total # of blocks */
1216 initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
1217 initprog_val[1] = rel_pages;
1218 initprog_val[2] = vacrel->dead_items_info->max_bytes;
1219 pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
1220
1221 /* Initialize for the first heap_vac_scan_next_block() call */
1224 vacrel->next_unskippable_allvis = false;
1225 vacrel->next_unskippable_eager_scanned = false;
1227
1228 /*
1229 * Set up the read stream for vacuum's first pass through the heap.
1230 *
1231 * This could be made safe for READ_STREAM_USE_BATCHING, but only with
1232 * explicit work in heap_vac_scan_next_block.
1233 */
1235 vacrel->bstrategy,
1236 vacrel->rel,
1239 vacrel,
1240 sizeof(uint8));
1241
1242 while (true)
1243 {
1244 Buffer buf;
1245 Page page;
1246 uint8 blk_info = 0;
1247 bool has_lpdead_items;
1248 void *per_buffer_data = NULL;
1249 bool vm_page_frozen = false;
1250 bool got_cleanup_lock = false;
1251
1252 vacuum_delay_point(false);
1253
1254 /*
1255 * Regularly check if wraparound failsafe should trigger.
1256 *
1257 * There is a similar check inside lazy_vacuum_all_indexes(), but
1258 * relfrozenxid might start to look dangerously old before we reach
1259 * that point. This check also provides failsafe coverage for the
1260 * one-pass strategy, and the two-pass strategy with the index_cleanup
1261 * param set to 'off'.
1262 */
1263 if (vacrel->scanned_pages > 0 &&
1264 vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0)
1266
1267 /*
1268 * Consider if we definitely have enough space to process TIDs on page
1269 * already. If we are close to overrunning the available space for
1270 * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
1271 * this page. However, let's force at least one page-worth of tuples
1272 * to be stored as to ensure we do at least some work when the memory
1273 * configured is so low that we run out before storing anything.
1274 */
1275 if (vacrel->dead_items_info->num_items > 0 &&
1277 {
1278 /*
1279 * Before beginning index vacuuming, we release any pin we may
1280 * hold on the visibility map page. This isn't necessary for
1281 * correctness, but we do it anyway to avoid holding the pin
1282 * across a lengthy, unrelated operation.
1283 */
1284 if (BufferIsValid(vmbuffer))
1285 {
1286 ReleaseBuffer(vmbuffer);
1287 vmbuffer = InvalidBuffer;
1288 }
1289
1290 /* Perform a round of index and heap vacuuming */
1291 vacrel->consider_bypass_optimization = false;
1292 lazy_vacuum(vacrel);
1293
1294 /*
1295 * Vacuum the Free Space Map to make newly-freed space visible on
1296 * upper-level FSM pages. Note that blkno is the previously
1297 * processed block.
1298 */
1299 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1300 blkno + 1);
1301 next_fsm_block_to_vacuum = blkno;
1302
1303 /* Report that we are once again scanning the heap */
1306 }
1307
1308 buf = read_stream_next_buffer(stream, &per_buffer_data);
1309
1310 /* The relation is exhausted. */
1311 if (!BufferIsValid(buf))
1312 break;
1313
1314 blk_info = *((uint8 *) per_buffer_data);
1316 page = BufferGetPage(buf);
1317 blkno = BufferGetBlockNumber(buf);
1318
1319 vacrel->scanned_pages++;
1320 if (blk_info & VAC_BLK_WAS_EAGER_SCANNED)
1321 vacrel->eager_scanned_pages++;
1322
1323 /* Report as block scanned, update error traceback information */
1326 blkno, InvalidOffsetNumber);
1327
1328 /*
1329 * Pin the visibility map page in case we need to mark the page
1330 * all-visible. In most cases this will be very cheap, because we'll
1331 * already have the correct page pinned anyway.
1332 */
1333 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
1334
1335 /*
1336 * We need a buffer cleanup lock to prune HOT chains and defragment
1337 * the page in lazy_scan_prune. But when it's not possible to acquire
1338 * a cleanup lock right away, we may be able to settle for reduced
1339 * processing using lazy_scan_noprune.
1340 */
1341 got_cleanup_lock = ConditionalLockBufferForCleanup(buf);
1342
1343 if (!got_cleanup_lock)
1345
1346 /* Check for new or empty pages before lazy_scan_[no]prune call */
1347 if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock,
1348 vmbuffer))
1349 {
1350 /* Processed as new/empty page (lock and pin released) */
1351 continue;
1352 }
1353
1354 /*
1355 * If we didn't get the cleanup lock, we can still collect LP_DEAD
1356 * items in the dead_items area for later vacuuming, count live and
1357 * recently dead tuples for vacuum logging, and determine if this
1358 * block could later be truncated. If we encounter any xid/mxids that
1359 * require advancing the relfrozenxid/relminxid, we'll have to wait
1360 * for a cleanup lock and call lazy_scan_prune().
1361 */
1362 if (!got_cleanup_lock &&
1363 !lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
1364 {
1365 /*
1366 * lazy_scan_noprune could not do all required processing. Wait
1367 * for a cleanup lock, and call lazy_scan_prune in the usual way.
1368 */
1369 Assert(vacrel->aggressive);
1372 got_cleanup_lock = true;
1373 }
1374
1375 /*
1376 * If we have a cleanup lock, we must now prune, freeze, and count
1377 * tuples. We may have acquired the cleanup lock originally, or we may
1378 * have gone back and acquired it after lazy_scan_noprune() returned
1379 * false. Either way, the page hasn't been processed yet.
1380 *
1381 * Like lazy_scan_noprune(), lazy_scan_prune() will count
1382 * recently_dead_tuples and live tuples for vacuum logging, determine
1383 * if the block can later be truncated, and accumulate the details of
1384 * remaining LP_DEAD line pointers on the page into dead_items. These
1385 * dead items include those pruned by lazy_scan_prune() as well as
1386 * line pointers previously marked LP_DEAD.
1387 */
1388 if (got_cleanup_lock)
1389 lazy_scan_prune(vacrel, buf, blkno, page,
1390 vmbuffer,
1392 &has_lpdead_items, &vm_page_frozen);
1393
1394 /*
1395 * Count an eagerly scanned page as a failure or a success.
1396 *
1397 * Only lazy_scan_prune() freezes pages, so if we didn't get the
1398 * cleanup lock, we won't have frozen the page. However, we only count
1399 * pages that were too new to require freezing as eager freeze
1400 * failures.
1401 *
1402 * We could gather more information from lazy_scan_noprune() about
1403 * whether or not there were tuples with XIDs or MXIDs older than the
1404 * FreezeLimit or MultiXactCutoff. However, for simplicity, we simply
1405 * exclude pages skipped due to cleanup lock contention from eager
1406 * freeze algorithm caps.
1407 */
1408 if (got_cleanup_lock &&
1409 (blk_info & VAC_BLK_WAS_EAGER_SCANNED))
1410 {
1411 /* Aggressive vacuums do not eager scan. */
1412 Assert(!vacrel->aggressive);
1413
1414 if (vm_page_frozen)
1415 {
1418
1419 if (vacrel->eager_scan_remaining_successes == 0)
1420 {
1421 /*
1422 * If we hit our success cap, permanently disable eager
1423 * scanning by setting the other eager scan management
1424 * fields to their disabled values.
1425 */
1426 vacrel->eager_scan_remaining_fails = 0;
1429
1430 ereport(vacrel->verbose ? INFO : DEBUG2,
1431 (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of \"%s.%s.%s\"",
1432 orig_eager_scan_success_limit,
1433 vacrel->dbname, vacrel->relnamespace,
1434 vacrel->relname)));
1435 }
1436 }
1437 else
1438 {
1441 }
1442 }
1443
1444 /*
1445 * Now drop the buffer lock and, potentially, update the FSM.
1446 *
1447 * Our goal is to update the freespace map the last time we touch the
1448 * page. If we'll process a block in the second pass, we may free up
1449 * additional space on the page, so it is better to update the FSM
1450 * after the second pass. If the relation has no indexes, or if index
1451 * vacuuming is disabled, there will be no second heap pass; if this
1452 * particular page has no dead items, the second heap pass will not
1453 * touch this page. So, in those cases, update the FSM now.
1454 *
1455 * Note: In corner cases, it's possible to miss updating the FSM
1456 * entirely. If index vacuuming is currently enabled, we'll skip the
1457 * FSM update now. But if failsafe mode is later activated, or there
1458 * are so few dead tuples that index vacuuming is bypassed, there will
1459 * also be no opportunity to update the FSM later, because we'll never
1460 * revisit this page. Since updating the FSM is desirable but not
1461 * absolutely required, that's OK.
1462 */
1463 if (vacrel->nindexes == 0
1464 || !vacrel->do_index_vacuuming
1465 || !has_lpdead_items)
1466 {
1467 Size freespace = PageGetHeapFreeSpace(page);
1468
1470 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1471
1472 /*
1473 * Periodically perform FSM vacuuming to make newly-freed space
1474 * visible on upper FSM pages. This is done after vacuuming if the
1475 * table has indexes. There will only be newly-freed space if we
1476 * held the cleanup lock and lazy_scan_prune() was called.
1477 */
1478 if (got_cleanup_lock && vacrel->nindexes == 0 && has_lpdead_items &&
1479 blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1480 {
1481 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1482 blkno);
1483 next_fsm_block_to_vacuum = blkno;
1484 }
1485 }
1486 else
1488 }
1489
1490 vacrel->blkno = InvalidBlockNumber;
1491 if (BufferIsValid(vmbuffer))
1492 ReleaseBuffer(vmbuffer);
1493
1494 /*
1495 * Report that everything is now scanned. We never skip scanning the last
1496 * block in the relation, so we can pass rel_pages here.
1497 */
1499 rel_pages);
1500
1501 /* now we can compute the new value for pg_class.reltuples */
1502 vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1503 vacrel->scanned_pages,
1504 vacrel->live_tuples);
1505
1506 /*
1507 * Also compute the total number of surviving heap entries. In the
1508 * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1509 */
1510 vacrel->new_rel_tuples =
1511 Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1512 vacrel->missed_dead_tuples;
1513
1514 read_stream_end(stream);
1515
1516 /*
1517 * Do index vacuuming (call each index's ambulkdelete routine), then do
1518 * related heap vacuuming
1519 */
1520 if (vacrel->dead_items_info->num_items > 0)
1521 lazy_vacuum(vacrel);
1522
1523 /*
1524 * Vacuum the remainder of the Free Space Map. We must do this whether or
1525 * not there were indexes, and whether or not we bypassed index vacuuming.
1526 * We can pass rel_pages here because we never skip scanning the last
1527 * block of the relation.
1528 */
1529 if (rel_pages > next_fsm_block_to_vacuum)
1530 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, rel_pages);
1531
1532 /* report all blocks vacuumed */
1534
1535 /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1536 if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1538}
void CheckBufferIsPinnedOnce(Buffer buffer)
Definition: bufmgr.c:5654
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5687
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5855
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:196
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:990
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:377
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:194
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:34
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES
Definition: progress.h:26
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
Definition: read_stream.c:770
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
Definition: read_stream.c:716
void read_stream_end(ReadStream *stream)
Definition: read_stream.c:1055
#define READ_STREAM_MAINTENANCE
Definition: read_stream.h:28
BlockNumber blkno
Definition: vacuumlazy.c:295
void vacuum_delay_point(bool is_analyze)
Definition: vacuum.c:2404
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1334
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items, bool *vm_page_frozen)
Definition: vacuumlazy.c:1945
static BlockNumber heap_vac_scan_next_block(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: vacuumlazy.c:1565
static void lazy_vacuum(LVRelState *vacrel)
Definition: vacuumlazy.c:2449
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3013
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
Definition: vacuumlazy.c:2238
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
Definition: vacuumlazy.c:1802
#define FAILSAFE_EVERY_PAGES
Definition: vacuumlazy.c:193
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:202
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)

References LVRelState::aggressive, Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), CheckBufferIsPinnedOnce(), ConditionalLockBufferForCleanup(), LVRelState::consider_bypass_optimization, LVRelState::current_block, LVRelState::dbname, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::eager_scan_max_fails_per_region, LVRelState::eager_scan_remaining_fails, LVRelState::eager_scan_remaining_successes, LVRelState::eager_scanned_pages, ereport, errmsg(), FAILSAFE_EVERY_PAGES, FreeSpaceMapVacuumRange(), heap_vac_scan_next_block(), INFO, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_new_or_empty(), lazy_scan_noprune(), lazy_scan_prune(), lazy_vacuum(), LVRelState::live_tuples, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, Max, VacDeadItemsInfo::max_bytes, LVRelState::missed_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::next_eager_scan_region_start, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::nindexes, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), LVRelState::recently_dead_tuples, RecordPageWithFreeSpace(), LVRelState::rel, LVRelState::rel_pages, ReleaseBuffer(), LVRelState::relname, LVRelState::relnamespace, LVRelState::scanned_pages, TidStoreMemoryUsage(), UnlockReleaseBuffer(), update_vacuum_error_info(), VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM, VAC_BLK_WAS_EAGER_SCANNED, vac_estimate_reltuples(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, LVRelState::verbose, and visibilitymap_pin().

Referenced by heap_vacuum_rel().

◆ lazy_scan_new_or_empty()

static bool lazy_scan_new_or_empty ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool  sharelock,
Buffer  vmbuffer 
)
static

Definition at line 1802 of file vacuumlazy.c.

1804{
1805 Size freespace;
1806
1807 if (PageIsNew(page))
1808 {
1809 /*
1810 * All-zeroes pages can be left over if either a backend extends the
1811 * relation by a single page, but crashes before the newly initialized
1812 * page has been written out, or when bulk-extending the relation
1813 * (which creates a number of empty pages at the tail end of the
1814 * relation), and then enters them into the FSM.
1815 *
1816 * Note we do not enter the page into the visibilitymap. That has the
1817 * downside that we repeatedly visit this page in subsequent vacuums,
1818 * but otherwise we'll never discover the space on a promoted standby.
1819 * The harm of repeated checking ought to normally not be too bad. The
1820 * space usually should be used at some point, otherwise there
1821 * wouldn't be any regular vacuums.
1822 *
1823 * Make sure these pages are in the FSM, to ensure they can be reused.
1824 * Do that by testing if there's any space recorded for the page. If
1825 * not, enter it. We do so after releasing the lock on the heap page,
1826 * the FSM is approximate, after all.
1827 */
1829
1830 if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1831 {
1832 freespace = BLCKSZ - SizeOfPageHeaderData;
1833
1834 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1835 }
1836
1837 return true;
1838 }
1839
1840 if (PageIsEmpty(page))
1841 {
1842 /*
1843 * It seems likely that caller will always be able to get a cleanup
1844 * lock on an empty page. But don't take any chances -- escalate to
1845 * an exclusive lock (still don't need a cleanup lock, though).
1846 */
1847 if (sharelock)
1848 {
1851
1852 if (!PageIsEmpty(page))
1853 {
1854 /* page isn't new or empty -- keep lock and pin for now */
1855 return false;
1856 }
1857 }
1858 else
1859 {
1860 /* Already have a full cleanup lock (which is more than enough) */
1861 }
1862
1863 /*
1864 * Unlike new pages, empty pages are always set all-visible and
1865 * all-frozen.
1866 */
1867 if (!PageIsAllVisible(page))
1868 {
1869 uint8 old_vmbits;
1870
1872
1873 /* mark buffer dirty before writing a WAL record */
1875
1876 /*
1877 * It's possible that another backend has extended the heap,
1878 * initialized the page, and then failed to WAL-log the page due
1879 * to an ERROR. Since heap extension is not WAL-logged, recovery
1880 * might try to replay our record setting the page all-visible and
1881 * find that the page isn't initialized, which will cause a PANIC.
1882 * To prevent that, check whether the page has been previously
1883 * WAL-logged, and if not, do that now.
1884 */
1885 if (RelationNeedsWAL(vacrel->rel) &&
1887 log_newpage_buffer(buf, true);
1888
1889 PageSetAllVisible(page);
1890 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
1892 vmbuffer, InvalidTransactionId,
1896
1897 /*
1898 * If the page wasn't already set all-visible and/or all-frozen in
1899 * the VM, count it as newly set for logging.
1900 */
1901 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
1902 {
1903 vacrel->vm_new_visible_pages++;
1905 }
1906 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0)
1907 vacrel->vm_new_frozen_pages++;
1908 }
1909
1910 freespace = PageGetHeapFreeSpace(page);
1912 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1913 return true;
1914 }
1915
1916 /* page isn't new or empty -- keep lock and pin */
1917 return false;
1918}
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2952
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:198
static bool PageIsAllVisible(const PageData *page)
Definition: bufpage.h:429
#define SizeOfPageHeaderData
Definition: bufpage.h:217
static void PageSetAllVisible(Page page)
Definition: bufpage.h:434
static XLogRecPtr PageGetLSN(const PageData *page)
Definition: bufpage.h:386
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:244
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
#define RelationNeedsWAL(relation)
Definition: rel.h:639
uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1237

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, END_CRIT_SECTION, GetRecordedFreeSpace(), InvalidTransactionId, InvalidXLogRecPtr, LockBuffer(), log_newpage_buffer(), MarkBufferDirty(), PageGetHeapFreeSpace(), PageGetLSN(), PageIsAllVisible(), PageIsEmpty(), PageIsNew(), PageSetAllVisible(), RecordPageWithFreeSpace(), LVRelState::rel, RelationNeedsWAL, SizeOfPageHeaderData, START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, and LVRelState::vm_new_visible_pages.

Referenced by lazy_scan_heap().

◆ lazy_scan_noprune()

static bool lazy_scan_noprune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool *  has_lpdead_items 
)
static

Definition at line 2238 of file vacuumlazy.c.

2243{
2244 OffsetNumber offnum,
2245 maxoff;
2246 int lpdead_items,
2247 live_tuples,
2248 recently_dead_tuples,
2249 missed_dead_tuples;
2250 bool hastup;
2251 HeapTupleHeader tupleheader;
2252 TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
2253 MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
2255
2256 Assert(BufferGetBlockNumber(buf) == blkno);
2257
2258 hastup = false; /* for now */
2259
2260 lpdead_items = 0;
2261 live_tuples = 0;
2262 recently_dead_tuples = 0;
2263 missed_dead_tuples = 0;
2264
2265 maxoff = PageGetMaxOffsetNumber(page);
2266 for (offnum = FirstOffsetNumber;
2267 offnum <= maxoff;
2268 offnum = OffsetNumberNext(offnum))
2269 {
2270 ItemId itemid;
2271 HeapTupleData tuple;
2272
2273 vacrel->offnum = offnum;
2274 itemid = PageGetItemId(page, offnum);
2275
2276 if (!ItemIdIsUsed(itemid))
2277 continue;
2278
2279 if (ItemIdIsRedirected(itemid))
2280 {
2281 hastup = true;
2282 continue;
2283 }
2284
2285 if (ItemIdIsDead(itemid))
2286 {
2287 /*
2288 * Deliberately don't set hastup=true here. See same point in
2289 * lazy_scan_prune for an explanation.
2290 */
2291 deadoffsets[lpdead_items++] = offnum;
2292 continue;
2293 }
2294
2295 hastup = true; /* page prevents rel truncation */
2296 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
2297 if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
2298 &NoFreezePageRelfrozenXid,
2299 &NoFreezePageRelminMxid))
2300 {
2301 /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
2302 if (vacrel->aggressive)
2303 {
2304 /*
2305 * Aggressive VACUUMs must always be able to advance rel's
2306 * relfrozenxid to a value >= FreezeLimit (and be able to
2307 * advance rel's relminmxid to a value >= MultiXactCutoff).
2308 * The ongoing aggressive VACUUM won't be able to do that
2309 * unless it can freeze an XID (or MXID) from this tuple now.
2310 *
2311 * The only safe option is to have caller perform processing
2312 * of this page using lazy_scan_prune. Caller might have to
2313 * wait a while for a cleanup lock, but it can't be helped.
2314 */
2315 vacrel->offnum = InvalidOffsetNumber;
2316 return false;
2317 }
2318
2319 /*
2320 * Non-aggressive VACUUMs are under no obligation to advance
2321 * relfrozenxid (even by one XID). We can be much laxer here.
2322 *
2323 * Currently we always just accept an older final relfrozenxid
2324 * and/or relminmxid value. We never make caller wait or work a
2325 * little harder, even when it likely makes sense to do so.
2326 */
2327 }
2328
2329 ItemPointerSet(&(tuple.t_self), blkno, offnum);
2330 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2331 tuple.t_len = ItemIdGetLength(itemid);
2332 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
2333
2334 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
2335 buf))
2336 {
2338 case HEAPTUPLE_LIVE:
2339
2340 /*
2341 * Count both cases as live, just like lazy_scan_prune
2342 */
2343 live_tuples++;
2344
2345 break;
2346 case HEAPTUPLE_DEAD:
2347
2348 /*
2349 * There is some useful work for pruning to do, that won't be
2350 * done due to failure to get a cleanup lock.
2351 */
2352 missed_dead_tuples++;
2353 break;
2355
2356 /*
2357 * Count in recently_dead_tuples, just like lazy_scan_prune
2358 */
2359 recently_dead_tuples++;
2360 break;
2362
2363 /*
2364 * Do not count these rows as live, just like lazy_scan_prune
2365 */
2366 break;
2367 default:
2368 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2369 break;
2370 }
2371 }
2372
2373 vacrel->offnum = InvalidOffsetNumber;
2374
2375 /*
2376 * By here we know for sure that caller can put off freezing and pruning
2377 * this particular page until the next VACUUM. Remember its details now.
2378 * (lazy_scan_prune expects a clean slate, so we have to do this last.)
2379 */
2380 vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
2381 vacrel->NewRelminMxid = NoFreezePageRelminMxid;
2382
2383 /* Save any LP_DEAD items found on the page in dead_items */
2384 if (vacrel->nindexes == 0)
2385 {
2386 /* Using one-pass strategy (since table has no indexes) */
2387 if (lpdead_items > 0)
2388 {
2389 /*
2390 * Perfunctory handling for the corner case where a single pass
2391 * strategy VACUUM cannot get a cleanup lock, and it turns out
2392 * that there is one or more LP_DEAD items: just count the LP_DEAD
2393 * items as missed_dead_tuples instead. (This is a bit dishonest,
2394 * but it beats having to maintain specialized heap vacuuming code
2395 * forever, for vanishingly little benefit.)
2396 */
2397 hastup = true;
2398 missed_dead_tuples += lpdead_items;
2399 }
2400 }
2401 else if (lpdead_items > 0)
2402 {
2403 /*
2404 * Page has LP_DEAD items, and so any references/TIDs that remain in
2405 * indexes will be deleted during index vacuuming (and then marked
2406 * LP_UNUSED in the heap)
2407 */
2408 vacrel->lpdead_item_pages++;
2409
2410 dead_items_add(vacrel, blkno, deadoffsets, lpdead_items);
2411
2412 vacrel->lpdead_items += lpdead_items;
2413 }
2414
2415 /*
2416 * Finally, add relevant page-local counts to whole-VACUUM counts
2417 */
2418 vacrel->live_tuples += live_tuples;
2419 vacrel->recently_dead_tuples += recently_dead_tuples;
2420 vacrel->missed_dead_tuples += missed_dead_tuples;
2421 if (missed_dead_tuples > 0)
2422 vacrel->missed_dead_pages++;
2423
2424 /* Can't truncate this page */
2425 if (hastup)
2426 vacrel->nonempty_pages = blkno + 1;
2427
2428 /* Did we find LP_DEAD items? */
2429 *has_lpdead_items = (lpdead_items > 0);
2430
2431 /* Caller won't need to call lazy_scan_prune with same page */
2432 return true;
2433}
TransactionId MultiXactId
Definition: c.h:633
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition: heapam.c:7829
#define MaxHeapTuplesPerPage
Definition: htup_details.h:624
static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: vacuumlazy.c:3548

References LVRelState::aggressive, Assert(), buf, BufferGetBlockNumber(), LVRelState::cutoffs, dead_items_add(), elog, ERROR, FirstOffsetNumber, heap_tuple_should_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by lazy_scan_heap().

◆ lazy_scan_prune()

static void lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
Buffer  vmbuffer,
bool  all_visible_according_to_vm,
bool *  has_lpdead_items,
bool *  vm_page_frozen 
)
static

Definition at line 1945 of file vacuumlazy.c.

1953{
1954 Relation rel = vacrel->rel;
1955 PruneFreezeResult presult;
1956 int prune_options = 0;
1957
1958 Assert(BufferGetBlockNumber(buf) == blkno);
1959
1960 /*
1961 * Prune all HOT-update chains and potentially freeze tuples on this page.
1962 *
1963 * If the relation has no indexes, we can immediately mark would-be dead
1964 * items LP_UNUSED.
1965 *
1966 * The number of tuples removed from the page is returned in
1967 * presult.ndeleted. It should not be confused with presult.lpdead_items;
1968 * presult.lpdead_items's final value can be thought of as the number of
1969 * tuples that were deleted from indexes.
1970 *
1971 * We will update the VM after collecting LP_DEAD items and freezing
1972 * tuples. Pruning will have determined whether or not the page is
1973 * all-visible.
1974 */
1975 prune_options = HEAP_PAGE_PRUNE_FREEZE;
1976 if (vacrel->nindexes == 0)
1977 prune_options |= HEAP_PAGE_PRUNE_MARK_UNUSED_NOW;
1978
1979 heap_page_prune_and_freeze(rel, buf, vacrel->vistest, prune_options,
1980 &vacrel->cutoffs, &presult, PRUNE_VACUUM_SCAN,
1981 &vacrel->offnum,
1982 &vacrel->NewRelfrozenXid, &vacrel->NewRelminMxid);
1983
1986
1987 if (presult.nfrozen > 0)
1988 {
1989 /*
1990 * We don't increment the new_frozen_tuple_pages instrumentation
1991 * counter when nfrozen == 0, since it only counts pages with newly
1992 * frozen tuples (don't confuse that with pages newly set all-frozen
1993 * in VM).
1994 */
1995 vacrel->new_frozen_tuple_pages++;
1996 }
1997
1998 /*
1999 * VACUUM will call heap_page_is_all_visible() during the second pass over
2000 * the heap to determine all_visible and all_frozen for the page -- this
2001 * is a specialized version of the logic from this function. Now that
2002 * we've finished pruning and freezing, make sure that we're in total
2003 * agreement with heap_page_is_all_visible() using an assertion.
2004 */
2005#ifdef USE_ASSERT_CHECKING
2006 /* Note that all_frozen value does not matter when !all_visible */
2007 if (presult.all_visible)
2008 {
2009 TransactionId debug_cutoff;
2010 bool debug_all_frozen;
2011
2012 Assert(presult.lpdead_items == 0);
2013
2014 if (!heap_page_is_all_visible(vacrel, buf,
2015 &debug_cutoff, &debug_all_frozen))
2016 Assert(false);
2017
2018 Assert(presult.all_frozen == debug_all_frozen);
2019
2020 Assert(!TransactionIdIsValid(debug_cutoff) ||
2021 debug_cutoff == presult.vm_conflict_horizon);
2022 }
2023#endif
2024
2025 /*
2026 * Now save details of the LP_DEAD items from the page in vacrel
2027 */
2028 if (presult.lpdead_items > 0)
2029 {
2030 vacrel->lpdead_item_pages++;
2031
2032 /*
2033 * deadoffsets are collected incrementally in
2034 * heap_page_prune_and_freeze() as each dead line pointer is recorded,
2035 * with an indeterminate order, but dead_items_add requires them to be
2036 * sorted.
2037 */
2038 qsort(presult.deadoffsets, presult.lpdead_items, sizeof(OffsetNumber),
2040
2041 dead_items_add(vacrel, blkno, presult.deadoffsets, presult.lpdead_items);
2042 }
2043
2044 /* Finally, add page-local counts to whole-VACUUM counts */
2045 vacrel->tuples_deleted += presult.ndeleted;
2046 vacrel->tuples_frozen += presult.nfrozen;
2047 vacrel->lpdead_items += presult.lpdead_items;
2048 vacrel->live_tuples += presult.live_tuples;
2049 vacrel->recently_dead_tuples += presult.recently_dead_tuples;
2050
2051 /* Can't truncate this page */
2052 if (presult.hastup)
2053 vacrel->nonempty_pages = blkno + 1;
2054
2055 /* Did we find LP_DEAD items? */
2056 *has_lpdead_items = (presult.lpdead_items > 0);
2057
2058 Assert(!presult.all_visible || !(*has_lpdead_items));
2059
2060 /*
2061 * Handle setting visibility map bit based on information from the VM (as
2062 * of last heap_vac_scan_next_block() call), and from all_visible and
2063 * all_frozen variables
2064 */
2065 if (!all_visible_according_to_vm && presult.all_visible)
2066 {
2067 uint8 old_vmbits;
2069
2070 if (presult.all_frozen)
2071 {
2073 flags |= VISIBILITYMAP_ALL_FROZEN;
2074 }
2075
2076 /*
2077 * It should never be the case that the visibility map page is set
2078 * while the page-level bit is clear, but the reverse is allowed (if
2079 * checksums are not enabled). Regardless, set both bits so that we
2080 * get back in sync.
2081 *
2082 * NB: If the heap page is all-visible but the VM bit is not set, we
2083 * don't need to dirty the heap page. However, if checksums are
2084 * enabled, we do need to make sure that the heap page is dirtied
2085 * before passing it to visibilitymap_set(), because it may be logged.
2086 * Given that this situation should only happen in rare cases after a
2087 * crash, it is not worth optimizing.
2088 */
2089 PageSetAllVisible(page);
2091 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
2093 vmbuffer, presult.vm_conflict_horizon,
2094 flags);
2095
2096 /*
2097 * If the page wasn't already set all-visible and/or all-frozen in the
2098 * VM, count it as newly set for logging.
2099 */
2100 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2101 {
2102 vacrel->vm_new_visible_pages++;
2103 if (presult.all_frozen)
2104 {
2106 *vm_page_frozen = true;
2107 }
2108 }
2109 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
2110 presult.all_frozen)
2111 {
2112 vacrel->vm_new_frozen_pages++;
2113 *vm_page_frozen = true;
2114 }
2115 }
2116
2117 /*
2118 * As of PostgreSQL 9.2, the visibility map bit should never be set if the
2119 * page-level bit is clear. However, it's possible that the bit got
2120 * cleared after heap_vac_scan_next_block() was called, so we must recheck
2121 * with buffer lock before concluding that the VM is corrupt.
2122 */
2123 else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
2124 visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
2125 {
2126 elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
2127 vacrel->relname, blkno);
2128 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
2130 }
2131
2132 /*
2133 * It's possible for the value returned by
2134 * GetOldestNonRemovableTransactionId() to move backwards, so it's not
2135 * wrong for us to see tuples that appear to not be visible to everyone
2136 * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
2137 * never moves backwards, but GetOldestNonRemovableTransactionId() is
2138 * conservative and sometimes returns a value that's unnecessarily small,
2139 * so if we see that contradiction it just means that the tuples that we
2140 * think are not visible to everyone yet actually are, and the
2141 * PD_ALL_VISIBLE flag is correct.
2142 *
2143 * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
2144 * however.
2145 */
2146 else if (presult.lpdead_items > 0 && PageIsAllVisible(page))
2147 {
2148 elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
2149 vacrel->relname, blkno);
2150 PageClearAllVisible(page);
2152 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
2154 }
2155
2156 /*
2157 * If the all-visible page is all-frozen but not marked as such yet, mark
2158 * it as all-frozen. Note that all_frozen is only valid if all_visible is
2159 * true, so we must check both all_visible and all_frozen.
2160 */
2161 else if (all_visible_according_to_vm && presult.all_visible &&
2162 presult.all_frozen && !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
2163 {
2164 uint8 old_vmbits;
2165
2166 /*
2167 * Avoid relying on all_visible_according_to_vm as a proxy for the
2168 * page-level PD_ALL_VISIBLE bit being set, since it might have become
2169 * stale -- even when all_visible is set
2170 */
2171 if (!PageIsAllVisible(page))
2172 {
2173 PageSetAllVisible(page);
2175 }
2176
2177 /*
2178 * Set the page all-frozen (and all-visible) in the VM.
2179 *
2180 * We can pass InvalidTransactionId as our cutoff_xid, since a
2181 * snapshotConflictHorizon sufficient to make everything safe for REDO
2182 * was logged when the page's tuples were frozen.
2183 */
2185 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
2187 vmbuffer, InvalidTransactionId,
2190
2191 /*
2192 * The page was likely already set all-visible in the VM. However,
2193 * there is a small chance that it was modified sometime between
2194 * setting all_visible_according_to_vm and checking the visibility
2195 * during pruning. Check the return value of old_vmbits anyway to
2196 * ensure the visibility map counters used for logging are accurate.
2197 */
2198 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2199 {
2200 vacrel->vm_new_visible_pages++;
2202 *vm_page_frozen = true;
2203 }
2204
2205 /*
2206 * We already checked that the page was not set all-frozen in the VM
2207 * above, so we don't need to test the value of old_vmbits.
2208 */
2209 else
2210 {
2211 vacrel->vm_new_frozen_pages++;
2212 *vm_page_frozen = true;
2213 }
2214 }
2215}
static void PageClearAllVisible(Page page)
Definition: bufpage.h:439
#define HEAP_PAGE_PRUNE_FREEZE
Definition: heapam.h:43
@ PRUNE_VACUUM_SCAN
Definition: heapam.h:269
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
Definition: heapam.h:42
#define qsort(a, b, c, d)
Definition: port.h:479
void heap_page_prune_and_freeze(Relation relation, Buffer buffer, GlobalVisState *vistest, int options, struct VacuumCutoffs *cutoffs, PruneFreezeResult *presult, PruneReason reason, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
Definition: pruneheap.c:350
int recently_dead_tuples
Definition: heapam.h:233
TransactionId vm_conflict_horizon
Definition: heapam.h:248
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]
Definition: heapam.h:262
bool all_visible
Definition: heapam.h:246
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:3615
static int cmpOffsetNumbers(const void *a, const void *b)
Definition: vacuumlazy.c:1922
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:26
#define VISIBILITYMAP_VALID_BITS

References PruneFreezeResult::all_frozen, PruneFreezeResult::all_visible, Assert(), buf, BufferGetBlockNumber(), cmpOffsetNumbers(), LVRelState::cutoffs, dead_items_add(), PruneFreezeResult::deadoffsets, elog, PruneFreezeResult::hastup, heap_page_is_all_visible(), heap_page_prune_and_freeze(), HEAP_PAGE_PRUNE_FREEZE, HEAP_PAGE_PRUNE_MARK_UNUSED_NOW, InvalidTransactionId, InvalidXLogRecPtr, LVRelState::live_tuples, PruneFreezeResult::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, PruneFreezeResult::lpdead_items, MarkBufferDirty(), MultiXactIdIsValid, PruneFreezeResult::ndeleted, LVRelState::new_frozen_tuple_pages, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, PruneFreezeResult::nfrozen, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, PageClearAllVisible(), PageIsAllVisible(), PageSetAllVisible(), PRUNE_VACUUM_SCAN, qsort, LVRelState::recently_dead_tuples, PruneFreezeResult::recently_dead_tuples, LVRelState::rel, LVRelState::relname, TransactionIdIsValid, LVRelState::tuples_deleted, LVRelState::tuples_frozen, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, LVRelState::vistest, VM_ALL_FROZEN, PruneFreezeResult::vm_conflict_horizon, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, and WARNING.

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 3210 of file vacuumlazy.c.

3211{
3212 BlockNumber orig_rel_pages = vacrel->rel_pages;
3213 BlockNumber new_rel_pages;
3214 bool lock_waiter_detected;
3215 int lock_retry;
3216
3217 /* Report that we are now truncating */
3220
3221 /* Update error traceback information one last time */
3224
3225 /*
3226 * Loop until no more truncating can be done.
3227 */
3228 do
3229 {
3230 /*
3231 * We need full exclusive lock on the relation in order to do
3232 * truncation. If we can't get it, give up rather than waiting --- we
3233 * don't want to block other backends, and we don't want to deadlock
3234 * (which is quite possible considering we already hold a lower-grade
3235 * lock).
3236 */
3237 lock_waiter_detected = false;
3238 lock_retry = 0;
3239 while (true)
3240 {
3242 break;
3243
3244 /*
3245 * Check for interrupts while trying to (re-)acquire the exclusive
3246 * lock.
3247 */
3249
3250 if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
3252 {
3253 /*
3254 * We failed to establish the lock in the specified number of
3255 * retries. This means we give up truncating.
3256 */
3257 ereport(vacrel->verbose ? INFO : DEBUG2,
3258 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
3259 vacrel->relname)));
3260 return;
3261 }
3262
3263 (void) WaitLatch(MyLatch,
3266 WAIT_EVENT_VACUUM_TRUNCATE);
3268 }
3269
3270 /*
3271 * Now that we have exclusive lock, look to see if the rel has grown
3272 * whilst we were vacuuming with non-exclusive lock. If so, give up;
3273 * the newly added pages presumably contain non-deletable tuples.
3274 */
3275 new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
3276 if (new_rel_pages != orig_rel_pages)
3277 {
3278 /*
3279 * Note: we intentionally don't update vacrel->rel_pages with the
3280 * new rel size here. If we did, it would amount to assuming that
3281 * the new pages are empty, which is unlikely. Leaving the numbers
3282 * alone amounts to assuming that the new pages have the same
3283 * tuple density as existing ones, which is less unlikely.
3284 */
3286 return;
3287 }
3288
3289 /*
3290 * Scan backwards from the end to verify that the end pages actually
3291 * contain no tuples. This is *necessary*, not optional, because
3292 * other backends could have added tuples to these pages whilst we
3293 * were vacuuming.
3294 */
3295 new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
3296 vacrel->blkno = new_rel_pages;
3297
3298 if (new_rel_pages >= orig_rel_pages)
3299 {
3300 /* can't do anything after all */
3302 return;
3303 }
3304
3305 /*
3306 * Okay to truncate.
3307 */
3308 RelationTruncate(vacrel->rel, new_rel_pages);
3309
3310 /*
3311 * We can release the exclusive lock as soon as we have truncated.
3312 * Other backends can't safely access the relation until they have
3313 * processed the smgr invalidation that smgrtruncate sent out ... but
3314 * that should happen as part of standard invalidation processing once
3315 * they acquire lock on the relation.
3316 */
3318
3319 /*
3320 * Update statistics. Here, it *is* correct to adjust rel_pages
3321 * without also touching reltuples, since the tuple count wasn't
3322 * changed by the truncation.
3323 */
3324 vacrel->removed_pages += orig_rel_pages - new_rel_pages;
3325 vacrel->rel_pages = new_rel_pages;
3326
3327 ereport(vacrel->verbose ? INFO : DEBUG2,
3328 (errmsg("table \"%s\": truncated %u to %u pages",
3329 vacrel->relname,
3330 orig_rel_pages, new_rel_pages)));
3331 orig_rel_pages = new_rel_pages;
3332 } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
3333}
struct Latch * MyLatch
Definition: globals.c:64
void ResetLatch(Latch *latch)
Definition: latch.c:372
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:172
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:314
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:278
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:38
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:289
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:180
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:181
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
Definition: vacuumlazy.c:3341
#define WL_TIMEOUT
Definition: waiteventset.h:37
#define WL_EXIT_ON_PM_DEATH
Definition: waiteventset.h:39
#define WL_LATCH_SET
Definition: waiteventset.h:34

References AccessExclusiveLock, LVRelState::blkno, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), INFO, InvalidOffsetNumber, MyLatch, LVRelState::nonempty_pages, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationTruncate(), LVRelState::relname, LVRelState::removed_pages, ResetLatch(), UnlockRelation(), update_vacuum_error_info(), VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, LVRelState::verbose, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 2449 of file vacuumlazy.c.

2450{
2451 bool bypass;
2452
2453 /* Should not end up here with no indexes */
2454 Assert(vacrel->nindexes > 0);
2455 Assert(vacrel->lpdead_item_pages > 0);
2456
2457 if (!vacrel->do_index_vacuuming)
2458 {
2459 Assert(!vacrel->do_index_cleanup);
2460 dead_items_reset(vacrel);
2461 return;
2462 }
2463
2464 /*
2465 * Consider bypassing index vacuuming (and heap vacuuming) entirely.
2466 *
2467 * We currently only do this in cases where the number of LP_DEAD items
2468 * for the entire VACUUM operation is close to zero. This avoids sharp
2469 * discontinuities in the duration and overhead of successive VACUUM
2470 * operations that run against the same table with a fixed workload.
2471 * Ideally, successive VACUUM operations will behave as if there are
2472 * exactly zero LP_DEAD items in cases where there are close to zero.
2473 *
2474 * This is likely to be helpful with a table that is continually affected
2475 * by UPDATEs that can mostly apply the HOT optimization, but occasionally
2476 * have small aberrations that lead to just a few heap pages retaining
2477 * only one or two LP_DEAD items. This is pretty common; even when the
2478 * DBA goes out of their way to make UPDATEs use HOT, it is practically
2479 * impossible to predict whether HOT will be applied in 100% of cases.
2480 * It's far easier to ensure that 99%+ of all UPDATEs against a table use
2481 * HOT through careful tuning.
2482 */
2483 bypass = false;
2484 if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
2485 {
2486 BlockNumber threshold;
2487
2488 Assert(vacrel->num_index_scans == 0);
2489 Assert(vacrel->lpdead_items == vacrel->dead_items_info->num_items);
2490 Assert(vacrel->do_index_vacuuming);
2491 Assert(vacrel->do_index_cleanup);
2492
2493 /*
2494 * This crossover point at which we'll start to do index vacuuming is
2495 * expressed as a percentage of the total number of heap pages in the
2496 * table that are known to have at least one LP_DEAD item. This is
2497 * much more important than the total number of LP_DEAD items, since
2498 * it's a proxy for the number of heap pages whose visibility map bits
2499 * cannot be set on account of bypassing index and heap vacuuming.
2500 *
2501 * We apply one further precautionary test: the space currently used
2502 * to store the TIDs (TIDs that now all point to LP_DEAD items) must
2503 * not exceed 32MB. This limits the risk that we will bypass index
2504 * vacuuming again and again until eventually there is a VACUUM whose
2505 * dead_items space is not CPU cache resident.
2506 *
2507 * We don't take any special steps to remember the LP_DEAD items (such
2508 * as counting them in our final update to the stats system) when the
2509 * optimization is applied. Though the accounting used in analyze.c's
2510 * acquire_sample_rows() will recognize the same LP_DEAD items as dead
2511 * rows in its own stats report, that's okay. The discrepancy should
2512 * be negligible. If this optimization is ever expanded to cover more
2513 * cases then this may need to be reconsidered.
2514 */
2515 threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
2516 bypass = (vacrel->lpdead_item_pages < threshold &&
2517 TidStoreMemoryUsage(vacrel->dead_items) < 32 * 1024 * 1024);
2518 }
2519
2520 if (bypass)
2521 {
2522 /*
2523 * There are almost zero TIDs. Behave as if there were precisely
2524 * zero: bypass index vacuuming, but do index cleanup.
2525 *
2526 * We expect that the ongoing VACUUM operation will finish very
2527 * quickly, so there is no point in considering speeding up as a
2528 * failsafe against wraparound failure. (Index cleanup is expected to
2529 * finish very quickly in cases where there were no ambulkdelete()
2530 * calls.)
2531 */
2532 vacrel->do_index_vacuuming = false;
2533 }
2534 else if (lazy_vacuum_all_indexes(vacrel))
2535 {
2536 /*
2537 * We successfully completed a round of index vacuuming. Do related
2538 * heap vacuuming now.
2539 */
2540 lazy_vacuum_heap_rel(vacrel);
2541 }
2542 else
2543 {
2544 /*
2545 * Failsafe case.
2546 *
2547 * We attempted index vacuuming, but didn't finish a full round/full
2548 * index scan. This happens when relfrozenxid or relminmxid is too
2549 * far in the past.
2550 *
2551 * From this point on the VACUUM operation will do no further index
2552 * vacuuming or heap vacuuming. This VACUUM operation won't end up
2553 * back here again.
2554 */
2556 }
2557
2558 /*
2559 * Forget the LP_DEAD items that we just vacuumed (or just decided to not
2560 * vacuum)
2561 */
2562 dead_items_reset(vacrel);
2563}
static void dead_items_reset(LVRelState *vacrel)
Definition: vacuumlazy.c:3570
#define BYPASS_THRESHOLD_PAGES
Definition: vacuumlazy.c:187
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2574
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
Definition: vacuumlazy.c:2719

References Assert(), BYPASS_THRESHOLD_PAGES, LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::dead_items_info, dead_items_reset(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, LVRelState::rel_pages, TidStoreMemoryUsage(), and VacuumFailsafeActive.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 2574 of file vacuumlazy.c.

2575{
2576 bool allindexes = true;
2577 double old_live_tuples = vacrel->rel->rd_rel->reltuples;
2578 const int progress_start_index[] = {
2581 };
2582 const int progress_end_index[] = {
2586 };
2587 int64 progress_start_val[2];
2588 int64 progress_end_val[3];
2589
2590 Assert(vacrel->nindexes > 0);
2591 Assert(vacrel->do_index_vacuuming);
2592 Assert(vacrel->do_index_cleanup);
2593
2594 /* Precheck for XID wraparound emergencies */
2596 {
2597 /* Wraparound emergency -- don't even start an index scan */
2598 return false;
2599 }
2600
2601 /*
2602 * Report that we are now vacuuming indexes and the number of indexes to
2603 * vacuum.
2604 */
2605 progress_start_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_INDEX;
2606 progress_start_val[1] = vacrel->nindexes;
2607 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
2608
2609 if (!ParallelVacuumIsActive(vacrel))
2610 {
2611 for (int idx = 0; idx < vacrel->nindexes; idx++)
2612 {
2613 Relation indrel = vacrel->indrels[idx];
2614 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2615
2616 vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat,
2617 old_live_tuples,
2618 vacrel);
2619
2620 /* Report the number of indexes vacuumed */
2622 idx + 1);
2623
2625 {
2626 /* Wraparound emergency -- end current index scan */
2627 allindexes = false;
2628 break;
2629 }
2630 }
2631 }
2632 else
2633 {
2634 /* Outsource everything to parallel variant */
2635 parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples,
2636 vacrel->num_index_scans);
2637
2638 /*
2639 * Do a postcheck to consider applying wraparound failsafe now. Note
2640 * that parallel VACUUM only gets the precheck and this postcheck.
2641 */
2643 allindexes = false;
2644 }
2645
2646 /*
2647 * We delete all LP_DEAD items from the first heap pass in all indexes on
2648 * each call here (except calls where we choose to do the failsafe). This
2649 * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2650 * of the failsafe triggering, which prevents the next call from taking
2651 * place).
2652 */
2653 Assert(vacrel->num_index_scans > 0 ||
2654 vacrel->dead_items_info->num_items == vacrel->lpdead_items);
2655 Assert(allindexes || VacuumFailsafeActive);
2656
2657 /*
2658 * Increase and report the number of index scans. Also, we reset
2659 * PROGRESS_VACUUM_INDEXES_TOTAL and PROGRESS_VACUUM_INDEXES_PROCESSED.
2660 *
2661 * We deliberately include the case where we started a round of bulk
2662 * deletes that we weren't able to finish due to the failsafe triggering.
2663 */
2664 vacrel->num_index_scans++;
2665 progress_end_val[0] = 0;
2666 progress_end_val[1] = 0;
2667 progress_end_val[2] = vacrel->num_index_scans;
2668 pgstat_progress_update_multi_param(3, progress_end_index, progress_end_val);
2669
2670 return allindexes;
2671}
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:35
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
Definition: vacuumlazy.c:3081
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)

References Assert(), LVRelState::dead_items_info, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, idx(), LVRelState::indrels, LVRelState::indstats, lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, LVRelState::pvs, RelationData::rd_rel, LVRelState::rel, and VacuumFailsafeActive.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static void lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
OffsetNumber deadoffsets,
int  num_offsets,
Buffer  vmbuffer 
)
static

Definition at line 2837 of file vacuumlazy.c.

2840{
2841 Page page = BufferGetPage(buffer);
2843 int nunused = 0;
2844 TransactionId visibility_cutoff_xid;
2845 bool all_frozen;
2846 LVSavedErrInfo saved_err_info;
2847
2848 Assert(vacrel->do_index_vacuuming);
2849
2851
2852 /* Update error traceback information */
2853 update_vacuum_error_info(vacrel, &saved_err_info,
2856
2858
2859 for (int i = 0; i < num_offsets; i++)
2860 {
2861 ItemId itemid;
2862 OffsetNumber toff = deadoffsets[i];
2863
2864 itemid = PageGetItemId(page, toff);
2865
2866 Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2867 ItemIdSetUnused(itemid);
2868 unused[nunused++] = toff;
2869 }
2870
2871 Assert(nunused > 0);
2872
2873 /* Attempt to truncate line pointer array now */
2875
2876 /*
2877 * Mark buffer dirty before we write WAL.
2878 */
2879 MarkBufferDirty(buffer);
2880
2881 /* XLOG stuff */
2882 if (RelationNeedsWAL(vacrel->rel))
2883 {
2884 log_heap_prune_and_freeze(vacrel->rel, buffer,
2886 false, /* no cleanup lock required */
2888 NULL, 0, /* frozen */
2889 NULL, 0, /* redirected */
2890 NULL, 0, /* dead */
2891 unused, nunused);
2892 }
2893
2894 /*
2895 * End critical section, so we safely can do visibility tests (which
2896 * possibly need to perform IO and allocate memory!). If we crash now the
2897 * page (including the corresponding vm bit) might not be marked all
2898 * visible, but that's fine. A later vacuum will fix that.
2899 */
2901
2902 /*
2903 * Now that we have removed the LP_DEAD items from the page, once again
2904 * check if the page has become all-visible. The page is already marked
2905 * dirty, exclusively locked, and, if needed, a full page image has been
2906 * emitted.
2907 */
2908 Assert(!PageIsAllVisible(page));
2909 if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
2910 &all_frozen))
2911 {
2912 uint8 old_vmbits;
2914
2915 if (all_frozen)
2916 {
2917 Assert(!TransactionIdIsValid(visibility_cutoff_xid));
2918 flags |= VISIBILITYMAP_ALL_FROZEN;
2919 }
2920
2921 PageSetAllVisible(page);
2922 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buffer,
2924 vmbuffer, visibility_cutoff_xid,
2925 flags);
2926
2927 /*
2928 * If the page wasn't already set all-visible and/or all-frozen in the
2929 * VM, count it as newly set for logging.
2930 */
2931 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2932 {
2933 vacrel->vm_new_visible_pages++;
2934 if (all_frozen)
2936 }
2937
2938 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
2939 all_frozen)
2940 vacrel->vm_new_frozen_pages++;
2941 }
2942
2943 /* Revert to the previous phase information for error traceback */
2944 restore_vacuum_error_info(vacrel, &saved_err_info);
2945}
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:834
@ PRUNE_VACUUM_CLEANUP
Definition: heapam.h:270
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Definition: pruneheap.c:2053

References Assert(), BufferGetPage(), LVRelState::do_index_vacuuming, END_CRIT_SECTION, heap_page_is_all_visible(), i, InvalidOffsetNumber, InvalidTransactionId, InvalidXLogRecPtr, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, log_heap_prune_and_freeze(), MarkBufferDirty(), MaxHeapTuplesPerPage, PageGetItemId(), PageIsAllVisible(), PageSetAllVisible(), PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PRUNE_VACUUM_CLEANUP, LVRelState::rel, RelationNeedsWAL, restore_vacuum_error_info(), START_CRIT_SECTION, TransactionIdIsValid, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, and LVRelState::vm_new_visible_pages.

Referenced by lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2719 of file vacuumlazy.c.

2720{
2721 ReadStream *stream;
2722 BlockNumber vacuumed_pages = 0;
2723 Buffer vmbuffer = InvalidBuffer;
2724 LVSavedErrInfo saved_err_info;
2725 TidStoreIter *iter;
2726
2727 Assert(vacrel->do_index_vacuuming);
2728 Assert(vacrel->do_index_cleanup);
2729 Assert(vacrel->num_index_scans > 0);
2730
2731 /* Report that we are now vacuuming the heap */
2734
2735 /* Update error traceback information */
2736 update_vacuum_error_info(vacrel, &saved_err_info,
2739
2740 iter = TidStoreBeginIterate(vacrel->dead_items);
2741
2742 /*
2743 * Set up the read stream for vacuum's second pass through the heap.
2744 *
2745 * It is safe to use batchmode, as vacuum_reap_lp_read_stream_next() does
2746 * not need to wait for IO and does not perform locking. Once we support
2747 * parallelism it should still be fine, as presumably the holder of locks
2748 * would never be blocked by IO while holding the lock.
2749 */
2752 vacrel->bstrategy,
2753 vacrel->rel,
2756 iter,
2757 sizeof(TidStoreIterResult));
2758
2759 while (true)
2760 {
2761 BlockNumber blkno;
2762 Buffer buf;
2763 Page page;
2764 TidStoreIterResult *iter_result;
2765 Size freespace;
2767 int num_offsets;
2768
2769 vacuum_delay_point(false);
2770
2771 buf = read_stream_next_buffer(stream, (void **) &iter_result);
2772
2773 /* The relation is exhausted */
2774 if (!BufferIsValid(buf))
2775 break;
2776
2777 vacrel->blkno = blkno = BufferGetBlockNumber(buf);
2778
2779 Assert(iter_result);
2780 num_offsets = TidStoreGetBlockOffsets(iter_result, offsets, lengthof(offsets));
2781 Assert(num_offsets <= lengthof(offsets));
2782
2783 /*
2784 * Pin the visibility map page in case we need to mark the page
2785 * all-visible. In most cases this will be very cheap, because we'll
2786 * already have the correct page pinned anyway.
2787 */
2788 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
2789
2790 /* We need a non-cleanup exclusive lock to mark dead_items unused */
2792 lazy_vacuum_heap_page(vacrel, blkno, buf, offsets,
2793 num_offsets, vmbuffer);
2794
2795 /* Now that we've vacuumed the page, record its available space */
2796 page = BufferGetPage(buf);
2797 freespace = PageGetHeapFreeSpace(page);
2798
2800 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
2801 vacuumed_pages++;
2802 }
2803
2804 read_stream_end(stream);
2805 TidStoreEndIterate(iter);
2806
2807 vacrel->blkno = InvalidBlockNumber;
2808 if (BufferIsValid(vmbuffer))
2809 ReleaseBuffer(vmbuffer);
2810
2811 /*
2812 * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2813 * the second heap pass. No more, no less.
2814 */
2815 Assert(vacrel->num_index_scans > 1 ||
2816 (vacrel->dead_items_info->num_items == vacrel->lpdead_items &&
2817 vacuumed_pages == vacrel->lpdead_item_pages));
2818
2820 (errmsg("table \"%s\": removed %" PRId64 " dead item identifiers in %u pages",
2821 vacrel->relname, vacrel->dead_items_info->num_items,
2822 vacuumed_pages)));
2823
2824 /* Revert to the previous phase information for error traceback */
2825 restore_vacuum_error_info(vacrel, &saved_err_info);
2826}
#define lengthof(array)
Definition: c.h:759
#define MaxOffsetNumber
Definition: off.h:28
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:36
#define READ_STREAM_USE_BATCHING
Definition: read_stream.h:64
TidStoreIter * TidStoreBeginIterate(TidStore *ts)
Definition: tidstore.c:471
void TidStoreEndIterate(TidStoreIter *iter)
Definition: tidstore.c:518
int TidStoreGetBlockOffsets(TidStoreIterResult *result, OffsetNumber *offsets, int max_offsets)
Definition: tidstore.c:566
static BlockNumber vacuum_reap_lp_read_stream_next(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: vacuumlazy.c:2681
static void lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
Definition: vacuumlazy.c:2837

References Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, ereport, errmsg(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_vacuum_heap_page(), lengthof, LockBuffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, MaxOffsetNumber, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), READ_STREAM_USE_BATCHING, RecordPageWithFreeSpace(), LVRelState::rel, ReleaseBuffer(), LVRelState::relname, restore_vacuum_error_info(), TidStoreBeginIterate(), TidStoreEndIterate(), TidStoreGetBlockOffsets(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, vacuum_reap_lp_read_stream_next(), and visibilitymap_pin().

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 3081 of file vacuumlazy.c.

3083{
3084 IndexVacuumInfo ivinfo;
3085 LVSavedErrInfo saved_err_info;
3086
3087 ivinfo.index = indrel;
3088 ivinfo.heaprel = vacrel->rel;
3089 ivinfo.analyze_only = false;
3090 ivinfo.report_progress = false;
3091 ivinfo.estimated_count = true;
3092 ivinfo.message_level = DEBUG2;
3093 ivinfo.num_heap_tuples = reltuples;
3094 ivinfo.strategy = vacrel->bstrategy;
3095
3096 /*
3097 * Update error traceback information.
3098 *
3099 * The index name is saved during this phase and restored immediately
3100 * after this phase. See vacuum_error_callback.
3101 */
3102 Assert(vacrel->indname == NULL);
3103 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
3104 update_vacuum_error_info(vacrel, &saved_err_info,
3107
3108 /* Do bulk deletion */
3109 istat = vac_bulkdel_one_index(&ivinfo, istat, vacrel->dead_items,
3110 vacrel->dead_items_info);
3111
3112 /* Revert to the previous phase information for error traceback */
3113 restore_vacuum_error_info(vacrel, &saved_err_info);
3114 pfree(vacrel->indname);
3115 vacrel->indname = NULL;
3116
3117 return istat;
3118}
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, TidStore *dead_items, VacDeadItemsInfo *dead_items_info)
Definition: vacuum.c:2611

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3849 of file vacuumlazy.c.

3851{
3852 vacrel->blkno = saved_vacrel->blkno;
3853 vacrel->offnum = saved_vacrel->offnum;
3854 vacrel->phase = saved_vacrel->phase;
3855}
BlockNumber blkno
Definition: vacuumlazy.c:417
VacErrPhase phase
Definition: vacuumlazy.c:419
OffsetNumber offnum
Definition: vacuumlazy.c:418

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 3190 of file vacuumlazy.c.

3191{
3192 BlockNumber possibly_freeable;
3193
3194 if (!vacrel->do_rel_truncate || VacuumFailsafeActive)
3195 return false;
3196
3197 possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
3198 if (possibly_freeable > 0 &&
3199 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
3200 possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION))
3201 return true;
3202
3203 return false;
3204}
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:169
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:170

References LVRelState::do_rel_truncate, LVRelState::nonempty_pages, LVRelState::rel_pages, REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, and VacuumFailsafeActive.

Referenced by heap_vacuum_rel().

◆ update_relstats_all_indexes()

static void update_relstats_all_indexes ( LVRelState vacrel)
static

Definition at line 3731 of file vacuumlazy.c.

3732{
3733 Relation *indrels = vacrel->indrels;
3734 int nindexes = vacrel->nindexes;
3735 IndexBulkDeleteResult **indstats = vacrel->indstats;
3736
3737 Assert(vacrel->do_index_cleanup);
3738
3739 for (int idx = 0; idx < nindexes; idx++)
3740 {
3741 Relation indrel = indrels[idx];
3742 IndexBulkDeleteResult *istat = indstats[idx];
3743
3744 if (istat == NULL || istat->estimated_count)
3745 continue;
3746
3747 /* Update index statistics */
3748 vac_update_relstats(indrel,
3749 istat->num_pages,
3750 istat->num_index_tuples,
3751 0, 0,
3752 false,
3755 NULL, NULL, false);
3756 }
3757}
double num_index_tuples
Definition: genam.h:102

References Assert(), LVRelState::do_index_cleanup, IndexBulkDeleteResult::estimated_count, idx(), LVRelState::indrels, LVRelState::indstats, InvalidMultiXactId, InvalidTransactionId, LVRelState::nindexes, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by heap_vacuum_rel().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3830 of file vacuumlazy.c.

3832{
3833 if (saved_vacrel)
3834 {
3835 saved_vacrel->offnum = vacrel->offnum;
3836 saved_vacrel->blkno = vacrel->blkno;
3837 saved_vacrel->phase = vacrel->phase;
3838 }
3839
3840 vacrel->blkno = blkno;
3841 vacrel->offnum = offnum;
3842 vacrel->phase = phase;
3843}

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void *  arg)
static

Definition at line 3766 of file vacuumlazy.c.

3767{
3768 LVRelState *errinfo = arg;
3769
3770 switch (errinfo->phase)
3771 {
3773 if (BlockNumberIsValid(errinfo->blkno))
3774 {
3775 if (OffsetNumberIsValid(errinfo->offnum))
3776 errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3777 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3778 else
3779 errcontext("while scanning block %u of relation \"%s.%s\"",
3780 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3781 }
3782 else
3783 errcontext("while scanning relation \"%s.%s\"",
3784 errinfo->relnamespace, errinfo->relname);
3785 break;
3786
3788 if (BlockNumberIsValid(errinfo->blkno))
3789 {
3790 if (OffsetNumberIsValid(errinfo->offnum))
3791 errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3792 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3793 else
3794 errcontext("while vacuuming block %u of relation \"%s.%s\"",
3795 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3796 }
3797 else
3798 errcontext("while vacuuming relation \"%s.%s\"",
3799 errinfo->relnamespace, errinfo->relname);
3800 break;
3801
3803 errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3804 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3805 break;
3806
3808 errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3809 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3810 break;
3811
3813 if (BlockNumberIsValid(errinfo->blkno))
3814 errcontext("while truncating relation \"%s.%s\" to %u blocks",
3815 errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3816 break;
3817
3819 default:
3820 return; /* do nothing; the errinfo may not be
3821 * initialized */
3822 }
3823}
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define errcontext
Definition: elog.h:197
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
void * arg

References arg, LVRelState::blkno, BlockNumberIsValid(), errcontext, LVRelState::indname, LVRelState::offnum, OffsetNumberIsValid, LVRelState::phase, LVRelState::relname, LVRelState::relnamespace, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().

◆ vacuum_reap_lp_read_stream_next()

static BlockNumber vacuum_reap_lp_read_stream_next ( ReadStream stream,
void *  callback_private_data,
void *  per_buffer_data 
)
static

Definition at line 2681 of file vacuumlazy.c.

2684{
2685 TidStoreIter *iter = callback_private_data;
2686 TidStoreIterResult *iter_result;
2687
2688 iter_result = TidStoreIterateNext(iter);
2689 if (iter_result == NULL)
2690 return InvalidBlockNumber;
2691
2692 /*
2693 * Save the TidStoreIterResult for later, so we can extract the offsets.
2694 * It is safe to copy the result, according to TidStoreIterateNext().
2695 */
2696 memcpy(per_buffer_data, iter_result, sizeof(*iter_result));
2697
2698 return iter_result->blkno;
2699}
BlockNumber blkno
Definition: tidstore.h:29
TidStoreIterResult * TidStoreIterateNext(TidStoreIter *iter)
Definition: tidstore.c:493

References TidStoreIterResult::blkno, InvalidBlockNumber, and TidStoreIterateNext().

Referenced by lazy_vacuum_heap_rel().