PostgreSQL Source Code git master
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/genam.h"
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/tidstore.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xloginsert.h"
#include "catalog/storage.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "common/int.h"
#include "common/pg_prng.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/read_stream.h"
#include "utils/lsyscache.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 
#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2
 
#define EAGER_SCAN_REGION_SIZE   4096
 
#define VAC_BLK_WAS_EAGER_SCANNED   (1 << 0)
 
#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM   (1 << 1)
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel)
 
static void heap_vacuum_eager_scan_setup (LVRelState *vacrel, const VacuumParams params)
 
static BlockNumber heap_vac_scan_next_block (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 
static void find_next_unskippable_block (LVRelState *vacrel, bool *skipsallvis)
 
static bool lazy_scan_new_or_empty (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
 
static int lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items, bool *vm_page_frozen)
 
static bool lazy_scan_noprune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static void lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_add (LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
 
static void dead_items_reset (LVRelState *vacrel)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_would_be_all_visible (Relation rel, Buffer buf, TransactionId OldestXmin, OffsetNumber *deadoffsets, int ndeadoffsets, bool *all_frozen, TransactionId *visibility_cutoff_xid, OffsetNumber *logging_offnum)
 
static void update_relstats_all_indexes (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, const VacuumParams params, BufferAccessStrategy bstrategy)
 
static int cmpOffsetNumbers (const void *a, const void *b)
 
static BlockNumber vacuum_reap_lp_read_stream_next (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 186 of file vacuumlazy.c.

◆ EAGER_SCAN_REGION_SIZE

#define EAGER_SCAN_REGION_SIZE   4096

Definition at line 249 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 192 of file vacuumlazy.c.

◆ MAX_EAGER_FREEZE_SUCCESS_RATE

#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2

Definition at line 240 of file vacuumlazy.c.

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 220 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 214 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 169 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 168 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 208 of file vacuumlazy.c.

◆ VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM

#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM   (1 << 1)

Definition at line 256 of file vacuumlazy.c.

◆ VAC_BLK_WAS_EAGER_SCANNED

#define VAC_BLK_WAS_EAGER_SCANNED   (1 << 0)

Definition at line 255 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 201 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 178 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 180 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 179 of file vacuumlazy.c.

Typedef Documentation

◆ LVRelState

typedef struct LVRelState LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 223 of file vacuumlazy.c.

224{
VacErrPhase
Definition: vacuumlazy.c:224
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition: vacuumlazy.c:226
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition: vacuumlazy.c:227
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition: vacuumlazy.c:230
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition: vacuumlazy.c:229
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition: vacuumlazy.c:228
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition: vacuumlazy.c:225

Function Documentation

◆ cmpOffsetNumbers()

static int cmpOffsetNumbers ( const void *  a,
const void *  b 
)
static

Definition at line 1974 of file vacuumlazy.c.

1975{
1976 return pg_cmp_u16(*(const OffsetNumber *) a, *(const OffsetNumber *) b);
1977}
static int pg_cmp_u16(uint16 a, uint16 b)
Definition: int.h:707
int b
Definition: isn.c:74
int a
Definition: isn.c:73
uint16 OffsetNumber
Definition: off.h:24

References a, b, and pg_cmp_u16().

Referenced by lazy_scan_prune().

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool *  lock_waiter_detected 
)
static

Definition at line 3417 of file vacuumlazy.c.

3418{
3420 "prefetch size must be power of 2");
3421
3422 BlockNumber blkno;
3423 BlockNumber prefetchedUntil;
3424 instr_time starttime;
3425
3426 /* Initialize the starttime if we check for conflicting lock requests */
3427 INSTR_TIME_SET_CURRENT(starttime);
3428
3429 /*
3430 * Start checking blocks at what we believe relation end to be and move
3431 * backwards. (Strange coding of loop control is needed because blkno is
3432 * unsigned.) To make the scan faster, we prefetch a few blocks at a time
3433 * in forward direction, so that OS-level readahead can kick in.
3434 */
3435 blkno = vacrel->rel_pages;
3436 prefetchedUntil = InvalidBlockNumber;
3437 while (blkno > vacrel->nonempty_pages)
3438 {
3439 Buffer buf;
3440 Page page;
3441 OffsetNumber offnum,
3442 maxoff;
3443 bool hastup;
3444
3445 /*
3446 * Check if another process requests a lock on our relation. We are
3447 * holding an AccessExclusiveLock here, so they will be waiting. We
3448 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
3449 * only check if that interval has elapsed once every 32 blocks to
3450 * keep the number of system calls and actual shared lock table
3451 * lookups to a minimum.
3452 */
3453 if ((blkno % 32) == 0)
3454 {
3455 instr_time currenttime;
3456 instr_time elapsed;
3457
3458 INSTR_TIME_SET_CURRENT(currenttime);
3459 elapsed = currenttime;
3460 INSTR_TIME_SUBTRACT(elapsed, starttime);
3461 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
3463 {
3465 {
3466 ereport(vacrel->verbose ? INFO : DEBUG2,
3467 (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
3468 vacrel->relname)));
3469
3470 *lock_waiter_detected = true;
3471 return blkno;
3472 }
3473 starttime = currenttime;
3474 }
3475 }
3476
3477 /*
3478 * We don't insert a vacuum delay point here, because we have an
3479 * exclusive lock on the table which we want to hold for as short a
3480 * time as possible. We still need to check for interrupts however.
3481 */
3483
3484 blkno--;
3485
3486 /* If we haven't prefetched this lot yet, do so now. */
3487 if (prefetchedUntil > blkno)
3488 {
3489 BlockNumber prefetchStart;
3490 BlockNumber pblkno;
3491
3492 prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
3493 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3494 {
3495 PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
3497 }
3498 prefetchedUntil = prefetchStart;
3499 }
3500
3502 vacrel->bstrategy);
3503
3504 /* In this phase we only need shared access to the buffer */
3506
3507 page = BufferGetPage(buf);
3508
3509 if (PageIsNew(page) || PageIsEmpty(page))
3510 {
3512 continue;
3513 }
3514
3515 hastup = false;
3516 maxoff = PageGetMaxOffsetNumber(page);
3517 for (offnum = FirstOffsetNumber;
3518 offnum <= maxoff;
3519 offnum = OffsetNumberNext(offnum))
3520 {
3521 ItemId itemid;
3522
3523 itemid = PageGetItemId(page, offnum);
3524
3525 /*
3526 * Note: any non-unused item should be taken as a reason to keep
3527 * this page. Even an LP_DEAD item makes truncation unsafe, since
3528 * we must not have cleaned out its index entries.
3529 */
3530 if (ItemIdIsUsed(itemid))
3531 {
3532 hastup = true;
3533 break; /* can stop scanning */
3534 }
3535 } /* scan along page */
3536
3538
3539 /* Done scanning if we found a tuple here */
3540 if (hastup)
3541 return blkno + 1;
3542 }
3543
3544 /*
3545 * If we fall out of the loop, all the previously-thought-to-be-empty
3546 * pages still are; we need not bother to look at the last known-nonempty
3547 * page.
3548 */
3549 return vacrel->nonempty_pages;
3550}
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:747
void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition: bufmgr.c:5699
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5478
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:886
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:436
@ BUFFER_LOCK_SHARE
Definition: bufmgr.h:206
@ RBM_NORMAL
Definition: bufmgr.h:46
static bool PageIsEmpty(const PageData *page)
Definition: bufpage.h:223
static bool PageIsNew(const PageData *page)
Definition: bufpage.h:233
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
PageData * Page
Definition: bufpage.h:81
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:371
#define StaticAssertDecl(condition, errmessage)
Definition: c.h:940
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define DEBUG2
Definition: elog.h:29
#define INFO
Definition: elog.h:34
#define ereport(elevel,...)
Definition: elog.h:150
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:194
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:367
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define FirstOffsetNumber
Definition: off.h:27
static char buf[DEFAULT_XLOG_SEG_SIZE]
Definition: pg_test_fsync.c:71
@ MAIN_FORKNUM
Definition: relpath.h:58
bool verbose
Definition: vacuumlazy.c:297
BlockNumber nonempty_pages
Definition: vacuumlazy.c:340
Relation rel
Definition: vacuumlazy.c:261
BlockNumber rel_pages
Definition: vacuumlazy.c:312
BufferAccessStrategy bstrategy
Definition: vacuumlazy.c:266
char * relname
Definition: vacuumlazy.c:292
#define PREFETCH_SIZE
Definition: vacuumlazy.c:214
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:178

References AccessExclusiveLock, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BufferGetPage(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelState::nonempty_pages, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageIsNew(), PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelState::rel, LVRelState::rel_pages, LVRelState::relname, StaticAssertDecl, UnlockReleaseBuffer(), VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and LVRelState::verbose.

Referenced by lazy_truncate_heap().

◆ dead_items_add()

static void dead_items_add ( LVRelState vacrel,
BlockNumber  blkno,
OffsetNumber offsets,
int  num_offsets 
)
static

Definition at line 3625 of file vacuumlazy.c.

3627{
3628 const int prog_index[2] = {
3631 };
3632 int64 prog_val[2];
3633
3634 TidStoreSetBlockOffsets(vacrel->dead_items, blkno, offsets, num_offsets);
3635 vacrel->dead_items_info->num_items += num_offsets;
3636
3637 /* update the progress information */
3638 prog_val[0] = vacrel->dead_items_info->num_items;
3639 prog_val[1] = TidStoreMemoryUsage(vacrel->dead_items);
3640 pgstat_progress_update_multi_param(2, prog_index, prog_val);
3641}
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
int64_t int64
Definition: c.h:549
#define PROGRESS_VACUUM_DEAD_TUPLE_BYTES
Definition: progress.h:27
#define PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS
Definition: progress.h:28
VacDeadItemsInfo * dead_items_info
Definition: vacuumlazy.c:310
TidStore * dead_items
Definition: vacuumlazy.c:309
int64 num_items
Definition: vacuum.h:300
void TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: tidstore.c:345
size_t TidStoreMemoryUsage(TidStore *ts)
Definition: tidstore.c:532

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::num_items, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS, TidStoreMemoryUsage(), and TidStoreSetBlockOffsets().

Referenced by lazy_scan_noprune(), and lazy_scan_prune().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 3560 of file vacuumlazy.c.

3561{
3562 VacDeadItemsInfo *dead_items_info;
3563 int vac_work_mem = AmAutoVacuumWorkerProcess() &&
3564 autovacuum_work_mem != -1 ?
3566
3567 /*
3568 * Initialize state for a parallel vacuum. As of now, only one worker can
3569 * be used for an index, so we invoke parallelism only if there are at
3570 * least two indexes on a table.
3571 */
3572 if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
3573 {
3574 /*
3575 * Since parallel workers cannot access data in temporary tables, we
3576 * can't perform parallel vacuum on them.
3577 */
3578 if (RelationUsesLocalBuffers(vacrel->rel))
3579 {
3580 /*
3581 * Give warning only if the user explicitly tries to perform a
3582 * parallel vacuum on the temporary table.
3583 */
3584 if (nworkers > 0)
3586 (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3587 vacrel->relname)));
3588 }
3589 else
3590 vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
3591 vacrel->nindexes, nworkers,
3592 vac_work_mem,
3593 vacrel->verbose ? INFO : DEBUG2,
3594 vacrel->bstrategy);
3595
3596 /*
3597 * If parallel mode started, dead_items and dead_items_info spaces are
3598 * allocated in DSM.
3599 */
3600 if (ParallelVacuumIsActive(vacrel))
3601 {
3603 &vacrel->dead_items_info);
3604 return;
3605 }
3606 }
3607
3608 /*
3609 * Serial VACUUM case. Allocate both dead_items and dead_items_info
3610 * locally.
3611 */
3612
3613 dead_items_info = palloc_object(VacDeadItemsInfo);
3614 dead_items_info->max_bytes = vac_work_mem * (Size) 1024;
3615 dead_items_info->num_items = 0;
3616 vacrel->dead_items_info = dead_items_info;
3617
3618 vacrel->dead_items = TidStoreCreateLocal(dead_items_info->max_bytes, true);
3619}
int autovacuum_work_mem
Definition: autovacuum.c:120
size_t Size
Definition: c.h:624
#define WARNING
Definition: elog.h:36
#define palloc_object(type)
Definition: fe_memutils.h:74
int maintenance_work_mem
Definition: globals.c:133
#define AmAutoVacuumWorkerProcess()
Definition: miscadmin.h:383
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:647
ParallelVacuumState * pvs
Definition: vacuumlazy.c:267
int nindexes
Definition: vacuumlazy.c:263
Relation * indrels
Definition: vacuumlazy.c:262
bool do_index_vacuuming
Definition: vacuumlazy.c:277
size_t max_bytes
Definition: vacuum.h:299
TidStore * TidStoreCreateLocal(size_t max_bytes, bool insert_only)
Definition: tidstore.c:162
#define ParallelVacuumIsActive(vacrel)
Definition: vacuumlazy.c:220
TidStore * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, VacDeadItemsInfo **dead_items_info_p)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int vac_work_mem, int elevel, BufferAccessStrategy bstrategy)

References AmAutoVacuumWorkerProcess, autovacuum_work_mem, LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_vacuuming, ereport, errmsg(), LVRelState::indrels, INFO, maintenance_work_mem, VacDeadItemsInfo::max_bytes, LVRelState::nindexes, VacDeadItemsInfo::num_items, palloc_object, parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, LVRelState::pvs, LVRelState::rel, RelationUsesLocalBuffers, LVRelState::relname, TidStoreCreateLocal(), LVRelState::verbose, and WARNING.

Referenced by heap_vacuum_rel().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 3673 of file vacuumlazy.c.

3674{
3675 if (!ParallelVacuumIsActive(vacrel))
3676 {
3677 /* Don't bother with pfree here */
3678 return;
3679 }
3680
3681 /* End parallel mode */
3682 parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
3683 vacrel->pvs = NULL;
3684}
IndexBulkDeleteResult ** indstats
Definition: vacuumlazy.c:346
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)

References LVRelState::indstats, parallel_vacuum_end(), ParallelVacuumIsActive, and LVRelState::pvs.

Referenced by heap_vacuum_rel().

◆ dead_items_reset()

static void dead_items_reset ( LVRelState vacrel)
static

Definition at line 3647 of file vacuumlazy.c.

3648{
3649 /* Update statistics for dead items */
3650 vacrel->num_dead_items_resets++;
3652
3653 if (ParallelVacuumIsActive(vacrel))
3654 {
3657 &vacrel->dead_items_info);
3658 return;
3659 }
3660
3661 /* Recreate the tidstore with the same max_bytes limitation */
3662 TidStoreDestroy(vacrel->dead_items);
3663 vacrel->dead_items = TidStoreCreateLocal(vacrel->dead_items_info->max_bytes, true);
3664
3665 /* Reset the counter */
3666 vacrel->dead_items_info->num_items = 0;
3667}
int num_dead_items_resets
Definition: vacuumlazy.c:350
Size total_dead_items_bytes
Definition: vacuumlazy.c:351
void TidStoreDestroy(TidStore *ts)
Definition: tidstore.c:317
void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::max_bytes, LVRelState::num_dead_items_resets, VacDeadItemsInfo::num_items, parallel_vacuum_get_dead_items(), parallel_vacuum_reset_dead_items(), ParallelVacuumIsActive, LVRelState::pvs, TidStoreCreateLocal(), TidStoreDestroy(), TidStoreMemoryUsage(), and LVRelState::total_dead_items_bytes.

Referenced by lazy_vacuum().

◆ find_next_unskippable_block()

static void find_next_unskippable_block ( LVRelState vacrel,
bool *  skipsallvis 
)
static

Definition at line 1732 of file vacuumlazy.c.

1733{
1734 BlockNumber rel_pages = vacrel->rel_pages;
1735 BlockNumber next_unskippable_block = vacrel->next_unskippable_block + 1;
1736 Buffer next_unskippable_vmbuffer = vacrel->next_unskippable_vmbuffer;
1737 bool next_unskippable_eager_scanned = false;
1738 bool next_unskippable_allvis;
1739
1740 *skipsallvis = false;
1741
1742 for (;; next_unskippable_block++)
1743 {
1744 uint8 mapbits = visibilitymap_get_status(vacrel->rel,
1745 next_unskippable_block,
1746 &next_unskippable_vmbuffer);
1747
1748 next_unskippable_allvis = (mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0;
1749
1750 /*
1751 * At the start of each eager scan region, normal vacuums with eager
1752 * scanning enabled reset the failure counter, allowing vacuum to
1753 * resume eager scanning if it had been suspended in the previous
1754 * region.
1755 */
1756 if (next_unskippable_block >= vacrel->next_eager_scan_region_start)
1757 {
1761 }
1762
1763 /*
1764 * A block is unskippable if it is not all visible according to the
1765 * visibility map.
1766 */
1767 if (!next_unskippable_allvis)
1768 {
1769 Assert((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0);
1770 break;
1771 }
1772
1773 /*
1774 * Caller must scan the last page to determine whether it has tuples
1775 * (caller must have the opportunity to set vacrel->nonempty_pages).
1776 * This rule avoids having lazy_truncate_heap() take access-exclusive
1777 * lock on rel to attempt a truncation that fails anyway, just because
1778 * there are tuples on the last page (it is likely that there will be
1779 * tuples on other nearby pages as well, but those can be skipped).
1780 *
1781 * Implement this by always treating the last block as unsafe to skip.
1782 */
1783 if (next_unskippable_block == rel_pages - 1)
1784 break;
1785
1786 /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1787 if (!vacrel->skipwithvm)
1788 break;
1789
1790 /*
1791 * All-frozen pages cannot contain XIDs < OldestXmin (XIDs that aren't
1792 * already frozen by now), so this page can be skipped.
1793 */
1794 if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0)
1795 continue;
1796
1797 /*
1798 * Aggressive vacuums cannot skip any all-visible pages that are not
1799 * also all-frozen.
1800 */
1801 if (vacrel->aggressive)
1802 break;
1803
1804 /*
1805 * Normal vacuums with eager scanning enabled only skip all-visible
1806 * but not all-frozen pages if they have hit the failure limit for the
1807 * current eager scan region.
1808 */
1809 if (vacrel->eager_scan_remaining_fails > 0)
1810 {
1811 next_unskippable_eager_scanned = true;
1812 break;
1813 }
1814
1815 /*
1816 * All-visible blocks are safe to skip in a normal vacuum. But
1817 * remember that the final range contains such a block for later.
1818 */
1819 *skipsallvis = true;
1820 }
1821
1822 /* write the local variables back to vacrel */
1823 vacrel->next_unskippable_block = next_unskippable_block;
1824 vacrel->next_unskippable_allvis = next_unskippable_allvis;
1825 vacrel->next_unskippable_eager_scanned = next_unskippable_eager_scanned;
1826 vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
1827}
uint8_t uint8
Definition: c.h:550
Assert(PointerIsAligned(start, uint64))
BlockNumber next_eager_scan_region_start
Definition: vacuumlazy.c:379
bool next_unskippable_eager_scanned
Definition: vacuumlazy.c:364
Buffer next_unskippable_vmbuffer
Definition: vacuumlazy.c:365
BlockNumber eager_scan_remaining_fails
Definition: vacuumlazy.c:411
bool aggressive
Definition: vacuumlazy.c:270
BlockNumber next_unskippable_block
Definition: vacuumlazy.c:362
bool skipwithvm
Definition: vacuumlazy.c:272
bool next_unskippable_allvis
Definition: vacuumlazy.c:363
BlockNumber eager_scan_max_fails_per_region
Definition: vacuumlazy.c:401
#define EAGER_SCAN_REGION_SIZE
Definition: vacuumlazy.c:249
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE

References LVRelState::aggressive, Assert(), LVRelState::eager_scan_max_fails_per_region, EAGER_SCAN_REGION_SIZE, LVRelState::eager_scan_remaining_fails, LVRelState::next_eager_scan_region_start, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::rel, LVRelState::rel_pages, LVRelState::skipwithvm, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_get_status().

Referenced by heap_vac_scan_next_block().

◆ heap_page_would_be_all_visible()

static bool heap_page_would_be_all_visible ( Relation  rel,
Buffer  buf,
TransactionId  OldestXmin,
OffsetNumber deadoffsets,
int  ndeadoffsets,
bool *  all_frozen,
TransactionId visibility_cutoff_xid,
OffsetNumber logging_offnum 
)
static

Definition at line 3741 of file vacuumlazy.c.

3748{
3749 Page page = BufferGetPage(buf);
3751 OffsetNumber offnum,
3752 maxoff;
3753 bool all_visible = true;
3754 int matched_dead_count = 0;
3755
3756 *visibility_cutoff_xid = InvalidTransactionId;
3757 *all_frozen = true;
3758
3759 Assert(ndeadoffsets == 0 || deadoffsets);
3760
3761#ifdef USE_ASSERT_CHECKING
3762 /* Confirm input deadoffsets[] is strictly sorted */
3763 if (ndeadoffsets > 1)
3764 {
3765 for (int i = 1; i < ndeadoffsets; i++)
3766 Assert(deadoffsets[i - 1] < deadoffsets[i]);
3767 }
3768#endif
3769
3770 maxoff = PageGetMaxOffsetNumber(page);
3771 for (offnum = FirstOffsetNumber;
3772 offnum <= maxoff && all_visible;
3773 offnum = OffsetNumberNext(offnum))
3774 {
3775 ItemId itemid;
3776 HeapTupleData tuple;
3777
3778 /*
3779 * Set the offset number so that we can display it along with any
3780 * error that occurred while processing this tuple.
3781 */
3782 *logging_offnum = offnum;
3783 itemid = PageGetItemId(page, offnum);
3784
3785 /* Unused or redirect line pointers are of no interest */
3786 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
3787 continue;
3788
3789 ItemPointerSet(&(tuple.t_self), blockno, offnum);
3790
3791 /*
3792 * Dead line pointers can have index pointers pointing to them. So
3793 * they can't be treated as visible
3794 */
3795 if (ItemIdIsDead(itemid))
3796 {
3797 if (!deadoffsets ||
3798 matched_dead_count >= ndeadoffsets ||
3799 deadoffsets[matched_dead_count] != offnum)
3800 {
3801 *all_frozen = all_visible = false;
3802 break;
3803 }
3804 matched_dead_count++;
3805 continue;
3806 }
3807
3808 Assert(ItemIdIsNormal(itemid));
3809
3810 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3811 tuple.t_len = ItemIdGetLength(itemid);
3812 tuple.t_tableOid = RelationGetRelid(rel);
3813
3814 /* Visibility checks may do IO or allocate memory */
3816 switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
3817 {
3818 case HEAPTUPLE_LIVE:
3819 {
3820 TransactionId xmin;
3821
3822 /* Check comments in lazy_scan_prune. */
3824 {
3825 all_visible = false;
3826 *all_frozen = false;
3827 break;
3828 }
3829
3830 /*
3831 * The inserter definitely committed. But is it old enough
3832 * that everyone sees it as committed?
3833 */
3834 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3835 if (!TransactionIdPrecedes(xmin, OldestXmin))
3836 {
3837 all_visible = false;
3838 *all_frozen = false;
3839 break;
3840 }
3841
3842 /* Track newest xmin on page. */
3843 if (TransactionIdFollows(xmin, *visibility_cutoff_xid) &&
3845 *visibility_cutoff_xid = xmin;
3846
3847 /* Check whether this tuple is already frozen or not */
3848 if (all_visible && *all_frozen &&
3850 *all_frozen = false;
3851 }
3852 break;
3853
3854 case HEAPTUPLE_DEAD:
3858 {
3859 all_visible = false;
3860 *all_frozen = false;
3861 break;
3862 }
3863 default:
3864 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3865 break;
3866 }
3867 } /* scan along page */
3868
3869 /* Clear the offset information once we have processed the given page. */
3870 *logging_offnum = InvalidOffsetNumber;
3871
3872 return all_visible;
3873}
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4318
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:353
uint32 TransactionId
Definition: c.h:671
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
volatile uint32 CritSectionCount
Definition: globals.c:45
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7855
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:128
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:129
@ HEAPTUPLE_LIVE
Definition: heapam.h:127
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:130
@ HEAPTUPLE_DEAD
Definition: heapam.h:126
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
Definition: htup_details.h:324
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
Definition: htup_details.h:337
int i
Definition: isn.c:77
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define InvalidOffsetNumber
Definition: off.h:26
#define RelationGetRelid(relation)
Definition: rel.h:515
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
static bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.h:297
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.h:263

References Assert(), buf, BufferGetBlockNumber(), BufferGetPage(), CritSectionCount, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin(), HeapTupleHeaderXminCommitted(), HeapTupleSatisfiesVacuum(), i, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, and TransactionIdPrecedes().

Referenced by lazy_vacuum_heap_page().

◆ heap_vac_scan_next_block()

static BlockNumber heap_vac_scan_next_block ( ReadStream stream,
void *  callback_private_data,
void *  per_buffer_data 
)
static

Definition at line 1627 of file vacuumlazy.c.

1630{
1631 BlockNumber next_block;
1632 LVRelState *vacrel = callback_private_data;
1633 uint8 blk_info = 0;
1634
1635 /* relies on InvalidBlockNumber + 1 overflowing to 0 on first call */
1636 next_block = vacrel->current_block + 1;
1637
1638 /* Have we reached the end of the relation? */
1639 if (next_block >= vacrel->rel_pages)
1640 {
1642 {
1645 }
1646 return InvalidBlockNumber;
1647 }
1648
1649 /*
1650 * We must be in one of the three following states:
1651 */
1652 if (next_block > vacrel->next_unskippable_block ||
1654 {
1655 /*
1656 * 1. We have just processed an unskippable block (or we're at the
1657 * beginning of the scan). Find the next unskippable block using the
1658 * visibility map.
1659 */
1660 bool skipsallvis;
1661
1662 find_next_unskippable_block(vacrel, &skipsallvis);
1663
1664 /*
1665 * We now know the next block that we must process. It can be the
1666 * next block after the one we just processed, or something further
1667 * ahead. If it's further ahead, we can jump to it, but we choose to
1668 * do so only if we can skip at least SKIP_PAGES_THRESHOLD consecutive
1669 * pages. Since we're reading sequentially, the OS should be doing
1670 * readahead for us, so there's no gain in skipping a page now and
1671 * then. Skipping such a range might even discourage sequential
1672 * detection.
1673 *
1674 * This test also enables more frequent relfrozenxid advancement
1675 * during non-aggressive VACUUMs. If the range has any all-visible
1676 * pages then skipping makes updating relfrozenxid unsafe, which is a
1677 * real downside.
1678 */
1679 if (vacrel->next_unskippable_block - next_block >= SKIP_PAGES_THRESHOLD)
1680 {
1681 next_block = vacrel->next_unskippable_block;
1682 if (skipsallvis)
1683 vacrel->skippedallvis = true;
1684 }
1685 }
1686
1687 /* Now we must be in one of the two remaining states: */
1688 if (next_block < vacrel->next_unskippable_block)
1689 {
1690 /*
1691 * 2. We are processing a range of blocks that we could have skipped
1692 * but chose not to. We know that they are all-visible in the VM,
1693 * otherwise they would've been unskippable.
1694 */
1695 vacrel->current_block = next_block;
1697 *((uint8 *) per_buffer_data) = blk_info;
1698 return vacrel->current_block;
1699 }
1700 else
1701 {
1702 /*
1703 * 3. We reached the next unskippable block. Process it. On next
1704 * iteration, we will be back in state 1.
1705 */
1706 Assert(next_block == vacrel->next_unskippable_block);
1707
1708 vacrel->current_block = next_block;
1709 if (vacrel->next_unskippable_allvis)
1712 blk_info |= VAC_BLK_WAS_EAGER_SCANNED;
1713 *((uint8 *) per_buffer_data) = blk_info;
1714 return vacrel->current_block;
1715 }
1716}
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5461
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:387
BlockNumber current_block
Definition: vacuumlazy.c:361
bool skippedallvis
Definition: vacuumlazy.c:287
#define VAC_BLK_WAS_EAGER_SCANNED
Definition: vacuumlazy.c:255
static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
Definition: vacuumlazy.c:1732
#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM
Definition: vacuumlazy.c:256
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:208

References Assert(), BufferIsValid(), LVRelState::current_block, find_next_unskippable_block(), InvalidBlockNumber, InvalidBuffer, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::rel_pages, ReleaseBuffer(), SKIP_PAGES_THRESHOLD, LVRelState::skippedallvis, VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM, and VAC_BLK_WAS_EAGER_SCANNED.

Referenced by lazy_scan_heap().

◆ heap_vacuum_eager_scan_setup()

static void heap_vacuum_eager_scan_setup ( LVRelState vacrel,
const VacuumParams  params 
)
static

Definition at line 502 of file vacuumlazy.c.

503{
504 uint32 randseed;
505 BlockNumber allvisible;
506 BlockNumber allfrozen;
507 float first_region_ratio;
508 bool oldest_unfrozen_before_cutoff = false;
509
510 /*
511 * Initialize eager scan management fields to their disabled values.
512 * Aggressive vacuums, normal vacuums of small tables, and normal vacuums
513 * of tables without sufficiently old tuples disable eager scanning.
514 */
517 vacrel->eager_scan_remaining_fails = 0;
519
520 /* If eager scanning is explicitly disabled, just return. */
521 if (params.max_eager_freeze_failure_rate == 0)
522 return;
523
524 /*
525 * The caller will have determined whether or not an aggressive vacuum is
526 * required by either the vacuum parameters or the relative age of the
527 * oldest unfrozen transaction IDs. An aggressive vacuum must scan every
528 * all-visible page to safely advance the relfrozenxid and/or relminmxid,
529 * so scans of all-visible pages are not considered eager.
530 */
531 if (vacrel->aggressive)
532 return;
533
534 /*
535 * Aggressively vacuuming a small relation shouldn't take long, so it
536 * isn't worth amortizing. We use two times the region size as the size
537 * cutoff because the eager scan start block is a random spot somewhere in
538 * the first region, making the second region the first to be eager
539 * scanned normally.
540 */
541 if (vacrel->rel_pages < 2 * EAGER_SCAN_REGION_SIZE)
542 return;
543
544 /*
545 * We only want to enable eager scanning if we are likely to be able to
546 * freeze some of the pages in the relation.
547 *
548 * Tuples with XIDs older than OldestXmin or MXIDs older than OldestMxact
549 * are technically freezable, but we won't freeze them unless the criteria
550 * for opportunistic freezing is met. Only tuples with XIDs/MXIDs older
551 * than the FreezeLimit/MultiXactCutoff are frozen in the common case.
552 *
553 * So, as a heuristic, we wait until the FreezeLimit has advanced past the
554 * relfrozenxid or the MultiXactCutoff has advanced past the relminmxid to
555 * enable eager scanning.
556 */
559 vacrel->cutoffs.FreezeLimit))
560 oldest_unfrozen_before_cutoff = true;
561
562 if (!oldest_unfrozen_before_cutoff &&
565 vacrel->cutoffs.MultiXactCutoff))
566 oldest_unfrozen_before_cutoff = true;
567
568 if (!oldest_unfrozen_before_cutoff)
569 return;
570
571 /* We have met the criteria to eagerly scan some pages. */
572
573 /*
574 * Our success cap is MAX_EAGER_FREEZE_SUCCESS_RATE of the number of
575 * all-visible but not all-frozen blocks in the relation.
576 */
577 visibilitymap_count(vacrel->rel, &allvisible, &allfrozen);
578
581 (allvisible - allfrozen));
582
583 /* If every all-visible page is frozen, eager scanning is disabled. */
584 if (vacrel->eager_scan_remaining_successes == 0)
585 return;
586
587 /*
588 * Now calculate the bounds of the first eager scan region. Its end block
589 * will be a random spot somewhere in the first EAGER_SCAN_REGION_SIZE
590 * blocks. This affects the bounds of all subsequent regions and avoids
591 * eager scanning and failing to freeze the same blocks each vacuum of the
592 * relation.
593 */
595
597
600
604
605 /*
606 * The first region will be smaller than subsequent regions. As such,
607 * adjust the eager freeze failures tolerated for this region.
608 */
609 first_region_ratio = 1 - (float) vacrel->next_eager_scan_region_start /
611
614 first_region_ratio;
615}
uint32_t uint32
Definition: c.h:552
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:2833
#define MultiXactIdIsValid(multi)
Definition: multixact.h:29
uint32 pg_prng_uint32(pg_prng_state *state)
Definition: pg_prng.c:227
pg_prng_state pg_global_prng_state
Definition: pg_prng.c:34
BlockNumber eager_scan_remaining_successes
Definition: vacuumlazy.c:390
struct VacuumCutoffs cutoffs
Definition: vacuumlazy.c:282
TransactionId FreezeLimit
Definition: vacuum.h:289
TransactionId relfrozenxid
Definition: vacuum.h:263
MultiXactId relminmxid
Definition: vacuum.h:264
MultiXactId MultiXactCutoff
Definition: vacuum.h:290
double max_eager_freeze_failure_rate
Definition: vacuum.h:244
#define MAX_EAGER_FREEZE_SUCCESS_RATE
Definition: vacuumlazy.c:240
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)

References LVRelState::aggressive, Assert(), LVRelState::cutoffs, LVRelState::eager_scan_max_fails_per_region, EAGER_SCAN_REGION_SIZE, LVRelState::eager_scan_remaining_fails, LVRelState::eager_scan_remaining_successes, VacuumCutoffs::FreezeLimit, InvalidBlockNumber, VacuumParams::max_eager_freeze_failure_rate, MAX_EAGER_FREEZE_SUCCESS_RATE, VacuumCutoffs::MultiXactCutoff, MultiXactIdIsValid, MultiXactIdPrecedes(), LVRelState::next_eager_scan_region_start, pg_global_prng_state, pg_prng_uint32(), LVRelState::rel, LVRelState::rel_pages, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, TransactionIdIsNormal, TransactionIdPrecedes(), and visibilitymap_count().

Referenced by heap_vacuum_rel().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
const VacuumParams  params,
BufferAccessStrategy  bstrategy 
)

Definition at line 629 of file vacuumlazy.c.

631{
632 LVRelState *vacrel;
633 bool verbose,
634 instrument,
635 skipwithvm,
636 frozenxid_updated,
637 minmulti_updated;
638 BlockNumber orig_rel_pages,
639 new_rel_pages,
640 new_rel_allvisible,
641 new_rel_allfrozen;
642 PGRUsage ru0;
643 TimestampTz starttime = 0;
644 PgStat_Counter startreadtime = 0,
645 startwritetime = 0;
646 WalUsage startwalusage = pgWalUsage;
647 BufferUsage startbufferusage = pgBufferUsage;
648 ErrorContextCallback errcallback;
649 char **indnames = NULL;
650 Size dead_items_max_bytes = 0;
651
652 verbose = (params.options & VACOPT_VERBOSE) != 0;
653 instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
654 params.log_vacuum_min_duration >= 0));
655 if (instrument)
656 {
657 pg_rusage_init(&ru0);
658 if (track_io_timing)
659 {
660 startreadtime = pgStatBlockReadTime;
661 startwritetime = pgStatBlockWriteTime;
662 }
663 }
664
665 /* Used for instrumentation and stats report */
666 starttime = GetCurrentTimestamp();
667
669 RelationGetRelid(rel));
672 params.is_wraparound
675 else
678
679 /*
680 * Setup error traceback support for ereport() first. The idea is to set
681 * up an error context callback to display additional information on any
682 * error during a vacuum. During different phases of vacuum, we update
683 * the state so that the error context callback always display current
684 * information.
685 *
686 * Copy the names of heap rel into local memory for error reporting
687 * purposes, too. It isn't always safe to assume that we can get the name
688 * of each rel. It's convenient for code in lazy_scan_heap to always use
689 * these temp copies.
690 */
691 vacrel = palloc0_object(LVRelState);
695 vacrel->indname = NULL;
697 vacrel->verbose = verbose;
698 errcallback.callback = vacuum_error_callback;
699 errcallback.arg = vacrel;
700 errcallback.previous = error_context_stack;
701 error_context_stack = &errcallback;
702
703 /* Set up high level stuff about rel and its indexes */
704 vacrel->rel = rel;
706 &vacrel->indrels);
707 vacrel->bstrategy = bstrategy;
708 if (instrument && vacrel->nindexes > 0)
709 {
710 /* Copy index names used by instrumentation (not error reporting) */
711 indnames = palloc_array(char *, vacrel->nindexes);
712 for (int i = 0; i < vacrel->nindexes; i++)
713 indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
714 }
715
716 /*
717 * The index_cleanup param either disables index vacuuming and cleanup or
718 * forces it to go ahead when we would otherwise apply the index bypass
719 * optimization. The default is 'auto', which leaves the final decision
720 * up to lazy_vacuum().
721 *
722 * The truncate param allows user to avoid attempting relation truncation,
723 * though it can't force truncation to happen.
724 */
727 params.truncate != VACOPTVALUE_AUTO);
728
729 /*
730 * While VacuumFailSafeActive is reset to false before calling this, we
731 * still need to reset it here due to recursive calls.
732 */
733 VacuumFailsafeActive = false;
734 vacrel->consider_bypass_optimization = true;
735 vacrel->do_index_vacuuming = true;
736 vacrel->do_index_cleanup = true;
737 vacrel->do_rel_truncate = (params.truncate != VACOPTVALUE_DISABLED);
739 {
740 /* Force disable index vacuuming up-front */
741 vacrel->do_index_vacuuming = false;
742 vacrel->do_index_cleanup = false;
743 }
744 else if (params.index_cleanup == VACOPTVALUE_ENABLED)
745 {
746 /* Force index vacuuming. Note that failsafe can still bypass. */
747 vacrel->consider_bypass_optimization = false;
748 }
749 else
750 {
751 /* Default/auto, make all decisions dynamically */
753 }
754
755 /* Initialize page counters explicitly (be tidy) */
756 vacrel->scanned_pages = 0;
757 vacrel->eager_scanned_pages = 0;
758 vacrel->removed_pages = 0;
759 vacrel->new_frozen_tuple_pages = 0;
760 vacrel->lpdead_item_pages = 0;
761 vacrel->missed_dead_pages = 0;
762 vacrel->nonempty_pages = 0;
763 /* dead_items_alloc allocates vacrel->dead_items later on */
764
765 /* Allocate/initialize output statistics state */
766 vacrel->new_rel_tuples = 0;
767 vacrel->new_live_tuples = 0;
768 vacrel->indstats = (IndexBulkDeleteResult **)
769 palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
770
771 /* Initialize remaining counters (be tidy) */
772 vacrel->num_index_scans = 0;
773 vacrel->num_dead_items_resets = 0;
774 vacrel->total_dead_items_bytes = 0;
775 vacrel->tuples_deleted = 0;
776 vacrel->tuples_frozen = 0;
777 vacrel->lpdead_items = 0;
778 vacrel->live_tuples = 0;
779 vacrel->recently_dead_tuples = 0;
780 vacrel->missed_dead_tuples = 0;
781
782 vacrel->vm_new_visible_pages = 0;
783 vacrel->vm_new_visible_frozen_pages = 0;
784 vacrel->vm_new_frozen_pages = 0;
785
786 /*
787 * Get cutoffs that determine which deleted tuples are considered DEAD,
788 * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
789 * the extent of the blocks that we'll scan in lazy_scan_heap. It has to
790 * happen in this order to ensure that the OldestXmin cutoff field works
791 * as an upper bound on the XIDs stored in the pages we'll actually scan
792 * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
793 *
794 * Next acquire vistest, a related cutoff that's used in pruning. We use
795 * vistest in combination with OldestXmin to ensure that
796 * heap_page_prune_and_freeze() always removes any deleted tuple whose
797 * xmax is < OldestXmin. lazy_scan_prune must never become confused about
798 * whether a tuple should be frozen or removed. (In the future we might
799 * want to teach lazy_scan_prune to recompute vistest from time to time,
800 * to increase the number of dead tuples it can prune away.)
801 */
802 vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
803 vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
804 vacrel->vistest = GlobalVisTestFor(rel);
805
806 /* Initialize state used to track oldest extant XID/MXID */
807 vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
808 vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
809
810 /*
811 * Initialize state related to tracking all-visible page skipping. This is
812 * very important to determine whether or not it is safe to advance the
813 * relfrozenxid/relminmxid.
814 */
815 vacrel->skippedallvis = false;
816 skipwithvm = true;
818 {
819 /*
820 * Force aggressive mode, and disable skipping blocks using the
821 * visibility map (even those set all-frozen)
822 */
823 vacrel->aggressive = true;
824 skipwithvm = false;
825 }
826
827 vacrel->skipwithvm = skipwithvm;
828
829 /*
830 * Set up eager scan tracking state. This must happen after determining
831 * whether or not the vacuum must be aggressive, because only normal
832 * vacuums use the eager scan algorithm.
833 */
834 heap_vacuum_eager_scan_setup(vacrel, params);
835
836 /* Report the vacuum mode: 'normal' or 'aggressive' */
838 vacrel->aggressive
841
842 if (verbose)
843 {
844 if (vacrel->aggressive)
846 (errmsg("aggressively vacuuming \"%s.%s.%s\"",
847 vacrel->dbname, vacrel->relnamespace,
848 vacrel->relname)));
849 else
851 (errmsg("vacuuming \"%s.%s.%s\"",
852 vacrel->dbname, vacrel->relnamespace,
853 vacrel->relname)));
854 }
855
856 /*
857 * Allocate dead_items memory using dead_items_alloc. This handles
858 * parallel VACUUM initialization as part of allocating shared memory
859 * space used for dead_items. (But do a failsafe precheck first, to
860 * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
861 * is already dangerously old.)
862 */
864 dead_items_alloc(vacrel, params.nworkers);
865
866 /*
867 * Call lazy_scan_heap to perform all required heap pruning, index
868 * vacuuming, and heap vacuuming (plus related processing)
869 */
870 lazy_scan_heap(vacrel);
871
872 /*
873 * Save dead items max_bytes and update the memory usage statistics before
874 * cleanup, they are freed in parallel vacuum cases during
875 * dead_items_cleanup().
876 */
877 dead_items_max_bytes = vacrel->dead_items_info->max_bytes;
879
880 /*
881 * Free resources managed by dead_items_alloc. This ends parallel mode in
882 * passing when necessary.
883 */
884 dead_items_cleanup(vacrel);
886
887 /*
888 * Update pg_class entries for each of rel's indexes where appropriate.
889 *
890 * Unlike the later update to rel's pg_class entry, this is not critical.
891 * Maintains relpages/reltuples statistics used by the planner only.
892 */
893 if (vacrel->do_index_cleanup)
895
896 /* Done with rel's indexes */
897 vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
898
899 /* Optionally truncate rel */
900 if (should_attempt_truncation(vacrel))
901 lazy_truncate_heap(vacrel);
902
903 /* Pop the error context stack */
904 error_context_stack = errcallback.previous;
905
906 /* Report that we are now doing final cleanup */
909
910 /*
911 * Prepare to update rel's pg_class entry.
912 *
913 * Aggressive VACUUMs must always be able to advance relfrozenxid to a
914 * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
915 * Non-aggressive VACUUMs may advance them by any amount, or not at all.
916 */
917 Assert(vacrel->NewRelfrozenXid == vacrel->cutoffs.OldestXmin ||
919 vacrel->cutoffs.relfrozenxid,
920 vacrel->NewRelfrozenXid));
921 Assert(vacrel->NewRelminMxid == vacrel->cutoffs.OldestMxact ||
923 vacrel->cutoffs.relminmxid,
924 vacrel->NewRelminMxid));
925 if (vacrel->skippedallvis)
926 {
927 /*
928 * Must keep original relfrozenxid in a non-aggressive VACUUM that
929 * chose to skip an all-visible page range. The state that tracks new
930 * values will have missed unfrozen XIDs from the pages we skipped.
931 */
932 Assert(!vacrel->aggressive);
935 }
936
937 /*
938 * For safety, clamp relallvisible to be not more than what we're setting
939 * pg_class.relpages to
940 */
941 new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
942 visibilitymap_count(rel, &new_rel_allvisible, &new_rel_allfrozen);
943 if (new_rel_allvisible > new_rel_pages)
944 new_rel_allvisible = new_rel_pages;
945
946 /*
947 * An all-frozen block _must_ be all-visible. As such, clamp the count of
948 * all-frozen blocks to the count of all-visible blocks. This matches the
949 * clamping of relallvisible above.
950 */
951 if (new_rel_allfrozen > new_rel_allvisible)
952 new_rel_allfrozen = new_rel_allvisible;
953
954 /*
955 * Now actually update rel's pg_class entry.
956 *
957 * In principle new_live_tuples could be -1 indicating that we (still)
958 * don't know the tuple count. In practice that can't happen, since we
959 * scan every page that isn't skipped using the visibility map.
960 */
961 vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
962 new_rel_allvisible, new_rel_allfrozen,
963 vacrel->nindexes > 0,
964 vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
965 &frozenxid_updated, &minmulti_updated, false);
966
967 /*
968 * Report results to the cumulative stats system, too.
969 *
970 * Deliberately avoid telling the stats system about LP_DEAD items that
971 * remain in the table due to VACUUM bypassing index and heap vacuuming.
972 * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
973 * It seems like a good idea to err on the side of not vacuuming again too
974 * soon in cases where the failsafe prevented significant amounts of heap
975 * vacuuming.
976 */
978 Max(vacrel->new_live_tuples, 0),
979 vacrel->recently_dead_tuples +
980 vacrel->missed_dead_tuples,
981 starttime);
983
984 if (instrument)
985 {
987
988 if (verbose || params.log_vacuum_min_duration == 0 ||
989 TimestampDifferenceExceeds(starttime, endtime,
991 {
992 long secs_dur;
993 int usecs_dur;
994 WalUsage walusage;
995 BufferUsage bufferusage;
997 char *msgfmt;
998 int32 diff;
999 double read_rate = 0,
1000 write_rate = 0;
1001 int64 total_blks_hit;
1002 int64 total_blks_read;
1003 int64 total_blks_dirtied;
1004
1005 TimestampDifference(starttime, endtime, &secs_dur, &usecs_dur);
1006 memset(&walusage, 0, sizeof(WalUsage));
1007 WalUsageAccumDiff(&walusage, &pgWalUsage, &startwalusage);
1008 memset(&bufferusage, 0, sizeof(BufferUsage));
1009 BufferUsageAccumDiff(&bufferusage, &pgBufferUsage, &startbufferusage);
1010
1011 total_blks_hit = bufferusage.shared_blks_hit +
1012 bufferusage.local_blks_hit;
1013 total_blks_read = bufferusage.shared_blks_read +
1014 bufferusage.local_blks_read;
1015 total_blks_dirtied = bufferusage.shared_blks_dirtied +
1016 bufferusage.local_blks_dirtied;
1017
1019 if (verbose)
1020 {
1021 /*
1022 * Aggressiveness already reported earlier, in dedicated
1023 * VACUUM VERBOSE ereport
1024 */
1025 Assert(!params.is_wraparound);
1026 msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
1027 }
1028 else if (params.is_wraparound)
1029 {
1030 /*
1031 * While it's possible for a VACUUM to be both is_wraparound
1032 * and !aggressive, that's just a corner-case -- is_wraparound
1033 * implies aggressive. Produce distinct output for the corner
1034 * case all the same, just in case.
1035 */
1036 if (vacrel->aggressive)
1037 msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1038 else
1039 msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1040 }
1041 else
1042 {
1043 if (vacrel->aggressive)
1044 msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
1045 else
1046 msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
1047 }
1048 appendStringInfo(&buf, msgfmt,
1049 vacrel->dbname,
1050 vacrel->relnamespace,
1051 vacrel->relname,
1052 vacrel->num_index_scans);
1053 appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total), %u eagerly scanned\n"),
1054 vacrel->removed_pages,
1055 new_rel_pages,
1056 vacrel->scanned_pages,
1057 orig_rel_pages == 0 ? 100.0 :
1058 100.0 * vacrel->scanned_pages /
1059 orig_rel_pages,
1060 vacrel->eager_scanned_pages);
1062 _("tuples: %" PRId64 " removed, %" PRId64 " remain, %" PRId64 " are dead but not yet removable\n"),
1063 vacrel->tuples_deleted,
1064 (int64) vacrel->new_rel_tuples,
1065 vacrel->recently_dead_tuples);
1066 if (vacrel->missed_dead_tuples > 0)
1068 _("tuples missed: %" PRId64 " dead from %u pages not removed due to cleanup lock contention\n"),
1069 vacrel->missed_dead_tuples,
1070 vacrel->missed_dead_pages);
1071 diff = (int32) (ReadNextTransactionId() -
1072 vacrel->cutoffs.OldestXmin);
1074 _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
1075 vacrel->cutoffs.OldestXmin, diff);
1076 if (frozenxid_updated)
1077 {
1078 diff = (int32) (vacrel->NewRelfrozenXid -
1079 vacrel->cutoffs.relfrozenxid);
1081 _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
1082 vacrel->NewRelfrozenXid, diff);
1083 }
1084 if (minmulti_updated)
1085 {
1086 diff = (int32) (vacrel->NewRelminMxid -
1087 vacrel->cutoffs.relminmxid);
1089 _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
1090 vacrel->NewRelminMxid, diff);
1091 }
1092 appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %" PRId64 " tuples frozen\n"),
1093 vacrel->new_frozen_tuple_pages,
1094 orig_rel_pages == 0 ? 100.0 :
1095 100.0 * vacrel->new_frozen_tuple_pages /
1096 orig_rel_pages,
1097 vacrel->tuples_frozen);
1098
1100 _("visibility map: %u pages set all-visible, %u pages set all-frozen (%u were all-visible)\n"),
1101 vacrel->vm_new_visible_pages,
1103 vacrel->vm_new_frozen_pages,
1104 vacrel->vm_new_frozen_pages);
1105 if (vacrel->do_index_vacuuming)
1106 {
1107 if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
1108 appendStringInfoString(&buf, _("index scan not needed: "));
1109 else
1110 appendStringInfoString(&buf, _("index scan needed: "));
1111
1112 msgfmt = _("%u pages from table (%.2f%% of total) had %" PRId64 " dead item identifiers removed\n");
1113 }
1114 else
1115 {
1117 appendStringInfoString(&buf, _("index scan bypassed: "));
1118 else
1119 appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
1120
1121 msgfmt = _("%u pages from table (%.2f%% of total) have %" PRId64 " dead item identifiers\n");
1122 }
1123 appendStringInfo(&buf, msgfmt,
1124 vacrel->lpdead_item_pages,
1125 orig_rel_pages == 0 ? 100.0 :
1126 100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
1127 vacrel->lpdead_items);
1128 for (int i = 0; i < vacrel->nindexes; i++)
1129 {
1130 IndexBulkDeleteResult *istat = vacrel->indstats[i];
1131
1132 if (!istat)
1133 continue;
1134
1136 _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
1137 indnames[i],
1138 istat->num_pages,
1139 istat->pages_newly_deleted,
1140 istat->pages_deleted,
1141 istat->pages_free);
1142 }
1144 {
1145 /*
1146 * We bypass the changecount mechanism because this value is
1147 * only updated by the calling process. We also rely on the
1148 * above call to pgstat_progress_end_command() to not clear
1149 * the st_progress_param array.
1150 */
1151 appendStringInfo(&buf, _("delay time: %.3f ms\n"),
1153 }
1154 if (track_io_timing)
1155 {
1156 double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
1157 double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
1158
1159 appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
1160 read_ms, write_ms);
1161 }
1162 if (secs_dur > 0 || usecs_dur > 0)
1163 {
1164 read_rate = (double) BLCKSZ * total_blks_read /
1165 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1166 write_rate = (double) BLCKSZ * total_blks_dirtied /
1167 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1168 }
1169 appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
1170 read_rate, write_rate);
1172 _("buffer usage: %" PRId64 " hits, %" PRId64 " reads, %" PRId64 " dirtied\n"),
1173 total_blks_hit,
1174 total_blks_read,
1175 total_blks_dirtied);
1177 _("WAL usage: %" PRId64 " records, %" PRId64 " full page images, %" PRIu64 " bytes, %" PRIu64 " full page image bytes, %" PRId64 " buffers full\n"),
1178 walusage.wal_records,
1179 walusage.wal_fpi,
1180 walusage.wal_bytes,
1181 walusage.wal_fpi_bytes,
1182 walusage.wal_buffers_full);
1183
1184 /*
1185 * Report the dead items memory usage.
1186 *
1187 * The num_dead_items_resets counter increases when we reset the
1188 * collected dead items, so the counter is non-zero if at least
1189 * one dead items are collected, even if index vacuuming is
1190 * disabled.
1191 */
1193 ngettext("memory usage: dead item storage %.2f MB accumulated across %d reset (limit %.2f MB each)\n",
1194 "memory usage: dead item storage %.2f MB accumulated across %d resets (limit %.2f MB each)\n",
1195 vacrel->num_dead_items_resets),
1196 (double) vacrel->total_dead_items_bytes / (1024 * 1024),
1197 vacrel->num_dead_items_resets,
1198 (double) dead_items_max_bytes / (1024 * 1024));
1199 appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
1200
1201 ereport(verbose ? INFO : LOG,
1202 (errmsg_internal("%s", buf.data)));
1203 pfree(buf.data);
1204 }
1205 }
1206
1207 /* Cleanup index statistics and index names */
1208 for (int i = 0; i < vacrel->nindexes; i++)
1209 {
1210 if (vacrel->indstats[i])
1211 pfree(vacrel->indstats[i]);
1212
1213 if (instrument)
1214 pfree(indnames[i]);
1215 }
1216}
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1721
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1781
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1645
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
PgBackendStatus * MyBEEntry
bool track_io_timing
Definition: bufmgr.c:169
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:294
#define ngettext(s, p, n)
Definition: c.h:1158
#define Max(x, y)
Definition: c.h:989
int32_t int32
Definition: c.h:548
int64 TimestampTz
Definition: timestamp.h:39
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1170
ErrorContextCallback * error_context_stack
Definition: elog.c:95
#define _(x)
Definition: elog.c:91
#define LOG
Definition: elog.h:31
#define palloc_array(type, count)
Definition: fe_memutils.h:76
#define palloc0_object(type)
Definition: fe_memutils.h:75
Oid MyDatabaseId
Definition: globals.c:94
int verbose
WalUsage pgWalUsage
Definition: instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition: instrument.c:288
BufferUsage pgBufferUsage
Definition: instrument.c:20
void BufferUsageAccumDiff(BufferUsage *dst, const BufferUsage *add, const BufferUsage *sub)
Definition: instrument.c:248
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
char * get_database_name(Oid dbid)
Definition: lsyscache.c:1242
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3516
char * pstrdup(const char *in)
Definition: mcxt.c:1781
void pfree(void *pointer)
Definition: mcxt.c:1616
void * palloc0(Size size)
Definition: mcxt.c:1417
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:2847
#define InvalidMultiXactId
Definition: multixact.h:25
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int64 PgStat_Counter
Definition: pgstat.h:67
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Relation rel, PgStat_Counter livetuples, PgStat_Counter deadtuples, TimestampTz starttime)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4067
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:41
#define PROGRESS_VACUUM_MODE
Definition: progress.h:32
#define PROGRESS_VACUUM_MODE_NORMAL
Definition: progress.h:44
#define PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM
Definition: progress.h:50
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define PROGRESS_VACUUM_DELAY_TIME
Definition: progress.h:31
#define PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM_WRAPAROUND
Definition: progress.h:51
#define PROGRESS_VACUUM_STARTED_BY_MANUAL
Definition: progress.h:49
#define PROGRESS_VACUUM_STARTED_BY
Definition: progress.h:33
#define PROGRESS_VACUUM_MODE_AGGRESSIVE
Definition: progress.h:45
#define RelationGetRelationName(relation)
Definition: rel.h:549
#define RelationGetNamespace(relation)
Definition: rel.h:556
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:145
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:230
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
int64 shared_blks_dirtied
Definition: instrument.h:28
int64 local_blks_hit
Definition: instrument.h:30
int64 shared_blks_read
Definition: instrument.h:27
int64 local_blks_read
Definition: instrument.h:31
int64 local_blks_dirtied
Definition: instrument.h:32
int64 shared_blks_hit
Definition: instrument.h:26
struct ErrorContextCallback * previous
Definition: elog.h:297
void(* callback)(void *arg)
Definition: elog.h:298
BlockNumber pages_deleted
Definition: genam.h:109
BlockNumber pages_newly_deleted
Definition: genam.h:108
BlockNumber pages_free
Definition: genam.h:110
BlockNumber num_pages
Definition: genam.h:104
BlockNumber vm_new_frozen_pages
Definition: vacuumlazy.c:336
int64 tuples_deleted
Definition: vacuumlazy.c:353
bool do_rel_truncate
Definition: vacuumlazy.c:279
BlockNumber scanned_pages
Definition: vacuumlazy.c:313
BlockNumber new_frozen_tuple_pages
Definition: vacuumlazy.c:322
GlobalVisState * vistest
Definition: vacuumlazy.c:283
BlockNumber removed_pages
Definition: vacuumlazy.c:321
int num_index_scans
Definition: vacuumlazy.c:349
double new_live_tuples
Definition: vacuumlazy.c:344
double new_rel_tuples
Definition: vacuumlazy.c:343
TransactionId NewRelfrozenXid
Definition: vacuumlazy.c:285
bool consider_bypass_optimization
Definition: vacuumlazy.c:274
int64 recently_dead_tuples
Definition: vacuumlazy.c:357
int64 tuples_frozen
Definition: vacuumlazy.c:354
char * dbname
Definition: vacuumlazy.c:290
BlockNumber missed_dead_pages
Definition: vacuumlazy.c:339
char * relnamespace
Definition: vacuumlazy.c:291
int64 live_tuples
Definition: vacuumlazy.c:356
int64 lpdead_items
Definition: vacuumlazy.c:355
BlockNumber lpdead_item_pages
Definition: vacuumlazy.c:338
BlockNumber eager_scanned_pages
Definition: vacuumlazy.c:319
bool do_index_cleanup
Definition: vacuumlazy.c:278
MultiXactId NewRelminMxid
Definition: vacuumlazy.c:286
int64 missed_dead_tuples
Definition: vacuumlazy.c:358
BlockNumber vm_new_visible_pages
Definition: vacuumlazy.c:325
VacErrPhase phase
Definition: vacuumlazy.c:296
char * indname
Definition: vacuumlazy.c:293
BlockNumber vm_new_visible_frozen_pages
Definition: vacuumlazy.c:333
int64 st_progress_param[PGSTAT_NUM_PROGRESS_PARAM]
TransactionId OldestXmin
Definition: vacuum.h:279
MultiXactId OldestMxact
Definition: vacuum.h:280
int nworkers
Definition: vacuum.h:251
VacOptValue truncate
Definition: vacuum.h:236
bits32 options
Definition: vacuum.h:219
int log_vacuum_min_duration
Definition: vacuum.h:227
bool is_wraparound
Definition: vacuum.h:226
VacOptValue index_cleanup
Definition: vacuum.h:235
int64 wal_buffers_full
Definition: instrument.h:57
uint64 wal_bytes
Definition: instrument.h:55
int64 wal_fpi
Definition: instrument.h:54
uint64 wal_fpi_bytes
Definition: instrument.h:56
int64 wal_records
Definition: instrument.h:53
static TransactionId ReadNextTransactionId(void)
Definition: transam.h:377
static bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.h:282
bool track_cost_delay_timing
Definition: vacuum.c:82
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:2362
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:2405
bool VacuumFailsafeActive
Definition: vacuum.c:110
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, BlockNumber num_all_frozen_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
Definition: vacuum.c:1426
bool vacuum_get_cutoffs(Relation rel, const VacuumParams params, struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1100
#define VACOPT_VERBOSE
Definition: vacuum.h:182
@ VACOPTVALUE_AUTO
Definition: vacuum.h:203
@ VACOPTVALUE_ENABLED
Definition: vacuum.h:205
@ VACOPTVALUE_UNSPECIFIED
Definition: vacuum.h:202
@ VACOPTVALUE_DISABLED
Definition: vacuum.h:204
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition: vacuum.h:188
static void dead_items_cleanup(LVRelState *vacrel)
Definition: vacuumlazy.c:3673
static void update_relstats_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3879
static void heap_vacuum_eager_scan_setup(LVRelState *vacrel, const VacuumParams params)
Definition: vacuumlazy.c:502
static void vacuum_error_callback(void *arg)
Definition: vacuumlazy.c:3914
static void lazy_truncate_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:3286
static bool should_attempt_truncation(LVRelState *vacrel)
Definition: vacuumlazy.c:3266
static void lazy_scan_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:1255
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
Definition: vacuumlazy.c:3035
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
Definition: vacuumlazy.c:3560
bool IsInParallelMode(void)
Definition: xact.c:1090

References _, LVRelState::aggressive, AmAutoVacuumWorkerProcess, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert(), LVRelState::bstrategy, buf, BufferUsageAccumDiff(), ErrorContextCallback::callback, LVRelState::consider_bypass_optimization, LVRelState::cutoffs, LVRelState::dbname, LVRelState::dead_items, dead_items_alloc(), dead_items_cleanup(), LVRelState::dead_items_info, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, LVRelState::eager_scanned_pages, ereport, errmsg(), errmsg_internal(), error_context_stack, VacuumCutoffs::FreezeLimit, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), GlobalVisTestFor(), heap_vacuum_eager_scan_setup(), i, VacuumParams::index_cleanup, LVRelState::indname, LVRelState::indrels, LVRelState::indstats, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsInParallelMode(), lazy_check_wraparound_failsafe(), lazy_scan_heap(), lazy_truncate_heap(), LVRelState::live_tuples, BufferUsage::local_blks_dirtied, BufferUsage::local_blks_hit, BufferUsage::local_blks_read, LOG, VacuumParams::log_vacuum_min_duration, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, Max, VacDeadItemsInfo::max_bytes, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, VacuumCutoffs::MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyBEEntry, MyDatabaseId, LVRelState::new_frozen_tuple_pages, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, ngettext, LVRelState::nindexes, NoLock, LVRelState::nonempty_pages, LVRelState::num_dead_items_resets, LVRelState::num_index_scans, IndexBulkDeleteResult::num_pages, VacuumParams::nworkers, VacuumCutoffs::OldestMxact, VacuumCutoffs::OldestXmin, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, palloc0(), palloc0_object, palloc_array, pfree(), pg_rusage_init(), pg_rusage_show(), pgBufferUsage, pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, LVRelState::phase, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_DELAY_TIME, PROGRESS_VACUUM_MODE, PROGRESS_VACUUM_MODE_AGGRESSIVE, PROGRESS_VACUUM_MODE_NORMAL, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, PROGRESS_VACUUM_STARTED_BY, PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM, PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM_WRAPAROUND, PROGRESS_VACUUM_STARTED_BY_MANUAL, pstrdup(), ReadNextTransactionId(), LVRelState::recently_dead_tuples, LVRelState::rel, LVRelState::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, LVRelState::relname, LVRelState::relnamespace, LVRelState::removed_pages, RowExclusiveLock, LVRelState::scanned_pages, BufferUsage::shared_blks_dirtied, BufferUsage::shared_blks_hit, BufferUsage::shared_blks_read, should_attempt_truncation(), LVRelState::skippedallvis, LVRelState::skipwithvm, PgBackendStatus::st_progress_param, TidStoreMemoryUsage(), TimestampDifference(), TimestampDifferenceExceeds(), LVRelState::total_dead_items_bytes, track_cost_delay_timing, track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelState::tuples_deleted, LVRelState::tuples_frozen, update_relstats_all_indexes(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_get_cutoffs(), VacuumFailsafeActive, LVRelState::verbose, verbose, visibilitymap_count(), LVRelState::vistest, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, WalUsage::wal_buffers_full, WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_fpi_bytes, WalUsage::wal_records, and WalUsageAccumDiff().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 3035 of file vacuumlazy.c.

3036{
3037 /* Don't warn more than once per VACUUM */
3039 return true;
3040
3042 {
3043 const int progress_index[] = {
3047 };
3048 int64 progress_val[3] = {0, 0, PROGRESS_VACUUM_MODE_FAILSAFE};
3049
3050 VacuumFailsafeActive = true;
3051
3052 /*
3053 * Abandon use of a buffer access strategy to allow use of all of
3054 * shared buffers. We assume the caller who allocated the memory for
3055 * the BufferAccessStrategy will free it.
3056 */
3057 vacrel->bstrategy = NULL;
3058
3059 /* Disable index vacuuming, index cleanup, and heap rel truncation */
3060 vacrel->do_index_vacuuming = false;
3061 vacrel->do_index_cleanup = false;
3062 vacrel->do_rel_truncate = false;
3063
3064 /* Reset the progress counters and set the failsafe mode */
3065 pgstat_progress_update_multi_param(3, progress_index, progress_val);
3066
3068 (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
3069 vacrel->dbname, vacrel->relnamespace, vacrel->relname,
3070 vacrel->num_index_scans),
3071 errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
3072 errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
3073 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
3074
3075 /* Stop applying cost limits from this point on */
3076 VacuumCostActive = false;
3078
3079 return true;
3080 }
3081
3082 return false;
3083}
#define unlikely(x)
Definition: c.h:418
int errdetail(const char *fmt,...)
Definition: elog.c:1216
int errhint(const char *fmt,...)
Definition: elog.c:1330
bool VacuumCostActive
Definition: globals.c:158
int VacuumCostBalance
Definition: globals.c:157
#define PROGRESS_VACUUM_MODE_FAILSAFE
Definition: progress.h:46
#define PROGRESS_VACUUM_INDEXES_PROCESSED
Definition: progress.h:30
#define PROGRESS_VACUUM_INDEXES_TOTAL
Definition: progress.h:29
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1268

References LVRelState::bstrategy, LVRelState::cutoffs, LVRelState::dbname, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errdetail(), errhint(), errmsg(), LVRelState::num_index_scans, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_MODE, PROGRESS_VACUUM_MODE_FAILSAFE, LVRelState::relname, LVRelState::relnamespace, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, VacuumFailsafeActive, and WARNING.

Referenced by heap_vacuum_rel(), lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 3089 of file vacuumlazy.c.

3090{
3091 double reltuples = vacrel->new_rel_tuples;
3092 bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
3093 const int progress_start_index[] = {
3096 };
3097 const int progress_end_index[] = {
3100 };
3101 int64 progress_start_val[2];
3102 int64 progress_end_val[2] = {0, 0};
3103
3104 Assert(vacrel->do_index_cleanup);
3105 Assert(vacrel->nindexes > 0);
3106
3107 /*
3108 * Report that we are now cleaning up indexes and the number of indexes to
3109 * cleanup.
3110 */
3111 progress_start_val[0] = PROGRESS_VACUUM_PHASE_INDEX_CLEANUP;
3112 progress_start_val[1] = vacrel->nindexes;
3113 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
3114
3115 if (!ParallelVacuumIsActive(vacrel))
3116 {
3117 for (int idx = 0; idx < vacrel->nindexes; idx++)
3118 {
3119 Relation indrel = vacrel->indrels[idx];
3120 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
3121
3122 vacrel->indstats[idx] =
3123 lazy_cleanup_one_index(indrel, istat, reltuples,
3124 estimated_count, vacrel);
3125
3126 /* Report the number of indexes cleaned up */
3128 idx + 1);
3129 }
3130 }
3131 else
3132 {
3133 /* Outsource everything to parallel variant */
3134 parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
3135 vacrel->num_index_scans,
3136 estimated_count);
3137 }
3138
3139 /* Reset the progress counters */
3140 pgstat_progress_update_multi_param(2, progress_end_index, progress_end_val);
3141}
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:262
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:39
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
Definition: vacuumlazy.c:3206
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)

References Assert(), LVRelState::do_index_cleanup, idx(), LVRelState::indrels, LVRelState::indstats, lazy_cleanup_one_index(), LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::num_index_scans, parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelState::pvs, LVRelState::rel_pages, and LVRelState::scanned_pages.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 3206 of file vacuumlazy.c.

3209{
3210 IndexVacuumInfo ivinfo;
3211 LVSavedErrInfo saved_err_info;
3212
3213 ivinfo.index = indrel;
3214 ivinfo.heaprel = vacrel->rel;
3215 ivinfo.analyze_only = false;
3216 ivinfo.report_progress = false;
3217 ivinfo.estimated_count = estimated_count;
3218 ivinfo.message_level = DEBUG2;
3219
3220 ivinfo.num_heap_tuples = reltuples;
3221 ivinfo.strategy = vacrel->bstrategy;
3222
3223 /*
3224 * Update error traceback information.
3225 *
3226 * The index name is saved during this phase and restored immediately
3227 * after this phase. See vacuum_error_callback.
3228 */
3229 Assert(vacrel->indname == NULL);
3230 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
3231 update_vacuum_error_info(vacrel, &saved_err_info,
3234
3235 istat = vac_cleanup_one_index(&ivinfo, istat);
3236
3237 /* Revert to the previous phase information for error traceback */
3238 restore_vacuum_error_info(vacrel, &saved_err_info);
3239 pfree(vacrel->indname);
3240 vacrel->indname = NULL;
3241
3242 return istat;
3243}
Relation index
Definition: genam.h:73
double num_heap_tuples
Definition: genam.h:79
bool analyze_only
Definition: genam.h:75
BufferAccessStrategy strategy
Definition: genam.h:80
Relation heaprel
Definition: genam.h:74
bool report_progress
Definition: genam.h:76
int message_level
Definition: genam.h:78
bool estimated_count
Definition: genam.h:77
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition: vacuum.c:2654
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
Definition: vacuumlazy.c:3997
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
Definition: vacuumlazy.c:3978

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel)
static

Definition at line 1255 of file vacuumlazy.c.

1256{
1257 ReadStream *stream;
1258 BlockNumber rel_pages = vacrel->rel_pages,
1259 blkno = 0,
1260 next_fsm_block_to_vacuum = 0;
1261 BlockNumber orig_eager_scan_success_limit =
1262 vacrel->eager_scan_remaining_successes; /* for logging */
1263 Buffer vmbuffer = InvalidBuffer;
1264 const int initprog_index[] = {
1268 };
1269 int64 initprog_val[3];
1270
1271 /* Report that we're scanning the heap, advertising total # of blocks */
1272 initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
1273 initprog_val[1] = rel_pages;
1274 initprog_val[2] = vacrel->dead_items_info->max_bytes;
1275 pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
1276
1277 /* Initialize for the first heap_vac_scan_next_block() call */
1280 vacrel->next_unskippable_allvis = false;
1281 vacrel->next_unskippable_eager_scanned = false;
1283
1284 /*
1285 * Set up the read stream for vacuum's first pass through the heap.
1286 *
1287 * This could be made safe for READ_STREAM_USE_BATCHING, but only with
1288 * explicit work in heap_vac_scan_next_block.
1289 */
1291 vacrel->bstrategy,
1292 vacrel->rel,
1295 vacrel,
1296 sizeof(uint8));
1297
1298 while (true)
1299 {
1300 Buffer buf;
1301 Page page;
1302 uint8 blk_info = 0;
1303 int ndeleted = 0;
1304 bool has_lpdead_items;
1305 void *per_buffer_data = NULL;
1306 bool vm_page_frozen = false;
1307 bool got_cleanup_lock = false;
1308
1309 vacuum_delay_point(false);
1310
1311 /*
1312 * Regularly check if wraparound failsafe should trigger.
1313 *
1314 * There is a similar check inside lazy_vacuum_all_indexes(), but
1315 * relfrozenxid might start to look dangerously old before we reach
1316 * that point. This check also provides failsafe coverage for the
1317 * one-pass strategy, and the two-pass strategy with the index_cleanup
1318 * param set to 'off'.
1319 */
1320 if (vacrel->scanned_pages > 0 &&
1321 vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0)
1323
1324 /*
1325 * Consider if we definitely have enough space to process TIDs on page
1326 * already. If we are close to overrunning the available space for
1327 * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
1328 * this page. However, let's force at least one page-worth of tuples
1329 * to be stored as to ensure we do at least some work when the memory
1330 * configured is so low that we run out before storing anything.
1331 */
1332 if (vacrel->dead_items_info->num_items > 0 &&
1334 {
1335 /*
1336 * Before beginning index vacuuming, we release any pin we may
1337 * hold on the visibility map page. This isn't necessary for
1338 * correctness, but we do it anyway to avoid holding the pin
1339 * across a lengthy, unrelated operation.
1340 */
1341 if (BufferIsValid(vmbuffer))
1342 {
1343 ReleaseBuffer(vmbuffer);
1344 vmbuffer = InvalidBuffer;
1345 }
1346
1347 /* Perform a round of index and heap vacuuming */
1348 vacrel->consider_bypass_optimization = false;
1349 lazy_vacuum(vacrel);
1350
1351 /*
1352 * Vacuum the Free Space Map to make newly-freed space visible on
1353 * upper-level FSM pages. Note that blkno is the previously
1354 * processed block.
1355 */
1356 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1357 blkno + 1);
1358 next_fsm_block_to_vacuum = blkno;
1359
1360 /* Report that we are once again scanning the heap */
1363 }
1364
1365 buf = read_stream_next_buffer(stream, &per_buffer_data);
1366
1367 /* The relation is exhausted. */
1368 if (!BufferIsValid(buf))
1369 break;
1370
1371 blk_info = *((uint8 *) per_buffer_data);
1373 page = BufferGetPage(buf);
1374 blkno = BufferGetBlockNumber(buf);
1375
1376 vacrel->scanned_pages++;
1377 if (blk_info & VAC_BLK_WAS_EAGER_SCANNED)
1378 vacrel->eager_scanned_pages++;
1379
1380 /* Report as block scanned, update error traceback information */
1383 blkno, InvalidOffsetNumber);
1384
1385 /*
1386 * Pin the visibility map page in case we need to mark the page
1387 * all-visible. In most cases this will be very cheap, because we'll
1388 * already have the correct page pinned anyway.
1389 */
1390 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
1391
1392 /*
1393 * We need a buffer cleanup lock to prune HOT chains and defragment
1394 * the page in lazy_scan_prune. But when it's not possible to acquire
1395 * a cleanup lock right away, we may be able to settle for reduced
1396 * processing using lazy_scan_noprune.
1397 */
1398 got_cleanup_lock = ConditionalLockBufferForCleanup(buf);
1399
1400 if (!got_cleanup_lock)
1402
1403 /* Check for new or empty pages before lazy_scan_[no]prune call */
1404 if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock,
1405 vmbuffer))
1406 {
1407 /* Processed as new/empty page (lock and pin released) */
1408 continue;
1409 }
1410
1411 /*
1412 * If we didn't get the cleanup lock, we can still collect LP_DEAD
1413 * items in the dead_items area for later vacuuming, count live and
1414 * recently dead tuples for vacuum logging, and determine if this
1415 * block could later be truncated. If we encounter any xid/mxids that
1416 * require advancing the relfrozenxid/relminxid, we'll have to wait
1417 * for a cleanup lock and call lazy_scan_prune().
1418 */
1419 if (!got_cleanup_lock &&
1420 !lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
1421 {
1422 /*
1423 * lazy_scan_noprune could not do all required processing. Wait
1424 * for a cleanup lock, and call lazy_scan_prune in the usual way.
1425 */
1426 Assert(vacrel->aggressive);
1429 got_cleanup_lock = true;
1430 }
1431
1432 /*
1433 * If we have a cleanup lock, we must now prune, freeze, and count
1434 * tuples. We may have acquired the cleanup lock originally, or we may
1435 * have gone back and acquired it after lazy_scan_noprune() returned
1436 * false. Either way, the page hasn't been processed yet.
1437 *
1438 * Like lazy_scan_noprune(), lazy_scan_prune() will count
1439 * recently_dead_tuples and live tuples for vacuum logging, determine
1440 * if the block can later be truncated, and accumulate the details of
1441 * remaining LP_DEAD line pointers on the page into dead_items. These
1442 * dead items include those pruned by lazy_scan_prune() as well as
1443 * line pointers previously marked LP_DEAD.
1444 */
1445 if (got_cleanup_lock)
1446 ndeleted = lazy_scan_prune(vacrel, buf, blkno, page,
1447 vmbuffer,
1449 &has_lpdead_items, &vm_page_frozen);
1450
1451 /*
1452 * Count an eagerly scanned page as a failure or a success.
1453 *
1454 * Only lazy_scan_prune() freezes pages, so if we didn't get the
1455 * cleanup lock, we won't have frozen the page. However, we only count
1456 * pages that were too new to require freezing as eager freeze
1457 * failures.
1458 *
1459 * We could gather more information from lazy_scan_noprune() about
1460 * whether or not there were tuples with XIDs or MXIDs older than the
1461 * FreezeLimit or MultiXactCutoff. However, for simplicity, we simply
1462 * exclude pages skipped due to cleanup lock contention from eager
1463 * freeze algorithm caps.
1464 */
1465 if (got_cleanup_lock &&
1466 (blk_info & VAC_BLK_WAS_EAGER_SCANNED))
1467 {
1468 /* Aggressive vacuums do not eager scan. */
1469 Assert(!vacrel->aggressive);
1470
1471 if (vm_page_frozen)
1472 {
1473 if (vacrel->eager_scan_remaining_successes > 0)
1475
1476 if (vacrel->eager_scan_remaining_successes == 0)
1477 {
1478 /*
1479 * Report only once that we disabled eager scanning. We
1480 * may eagerly read ahead blocks in excess of the success
1481 * or failure caps before attempting to freeze them, so we
1482 * could reach here even after disabling additional eager
1483 * scanning.
1484 */
1485 if (vacrel->eager_scan_max_fails_per_region > 0)
1486 ereport(vacrel->verbose ? INFO : DEBUG2,
1487 (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of relation \"%s.%s.%s\"",
1488 orig_eager_scan_success_limit,
1489 vacrel->dbname, vacrel->relnamespace,
1490 vacrel->relname)));
1491
1492 /*
1493 * If we hit our success cap, permanently disable eager
1494 * scanning by setting the other eager scan management
1495 * fields to their disabled values.
1496 */
1497 vacrel->eager_scan_remaining_fails = 0;
1500 }
1501 }
1502 else if (vacrel->eager_scan_remaining_fails > 0)
1504 }
1505
1506 /*
1507 * Now drop the buffer lock and, potentially, update the FSM.
1508 *
1509 * Our goal is to update the freespace map the last time we touch the
1510 * page. If we'll process a block in the second pass, we may free up
1511 * additional space on the page, so it is better to update the FSM
1512 * after the second pass. If the relation has no indexes, or if index
1513 * vacuuming is disabled, there will be no second heap pass; if this
1514 * particular page has no dead items, the second heap pass will not
1515 * touch this page. So, in those cases, update the FSM now.
1516 *
1517 * Note: In corner cases, it's possible to miss updating the FSM
1518 * entirely. If index vacuuming is currently enabled, we'll skip the
1519 * FSM update now. But if failsafe mode is later activated, or there
1520 * are so few dead tuples that index vacuuming is bypassed, there will
1521 * also be no opportunity to update the FSM later, because we'll never
1522 * revisit this page. Since updating the FSM is desirable but not
1523 * absolutely required, that's OK.
1524 */
1525 if (vacrel->nindexes == 0
1526 || !vacrel->do_index_vacuuming
1527 || !has_lpdead_items)
1528 {
1529 Size freespace = PageGetHeapFreeSpace(page);
1530
1532 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1533
1534 /*
1535 * Periodically perform FSM vacuuming to make newly-freed space
1536 * visible on upper FSM pages. This is done after vacuuming if the
1537 * table has indexes. There will only be newly-freed space if we
1538 * held the cleanup lock and lazy_scan_prune() was called.
1539 */
1540 if (got_cleanup_lock && vacrel->nindexes == 0 && ndeleted > 0 &&
1541 blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1542 {
1543 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1544 blkno);
1545 next_fsm_block_to_vacuum = blkno;
1546 }
1547 }
1548 else
1550 }
1551
1552 vacrel->blkno = InvalidBlockNumber;
1553 if (BufferIsValid(vmbuffer))
1554 ReleaseBuffer(vmbuffer);
1555
1556 /*
1557 * Report that everything is now scanned. We never skip scanning the last
1558 * block in the relation, so we can pass rel_pages here.
1559 */
1561 rel_pages);
1562
1563 /* now we can compute the new value for pg_class.reltuples */
1564 vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1565 vacrel->scanned_pages,
1566 vacrel->live_tuples);
1567
1568 /*
1569 * Also compute the total number of surviving heap entries. In the
1570 * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1571 */
1572 vacrel->new_rel_tuples =
1573 Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1574 vacrel->missed_dead_tuples;
1575
1576 read_stream_end(stream);
1577
1578 /*
1579 * Do index vacuuming (call each index's ambulkdelete routine), then do
1580 * related heap vacuuming
1581 */
1582 if (vacrel->dead_items_info->num_items > 0)
1583 lazy_vacuum(vacrel);
1584
1585 /*
1586 * Vacuum the remainder of the Free Space Map. We must do this whether or
1587 * not there were indexes, and whether or not we bypassed index vacuuming.
1588 * We can pass rel_pages here because we never skip scanning the last
1589 * block of the relation.
1590 */
1591 if (rel_pages > next_fsm_block_to_vacuum)
1592 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, rel_pages);
1593
1594 /* report all blocks vacuumed */
1596
1597 /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1598 if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1600}
void CheckBufferIsPinnedOnce(Buffer buffer)
Definition: bufmgr.c:5746
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5779
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5952
@ BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:205
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:990
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:377
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:194
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:36
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES
Definition: progress.h:26
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
Definition: read_stream.c:791
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
Definition: read_stream.c:737
void read_stream_end(ReadStream *stream)
Definition: read_stream.c:1089
#define READ_STREAM_MAINTENANCE
Definition: read_stream.h:28
BlockNumber blkno
Definition: vacuumlazy.c:294
void vacuum_delay_point(bool is_analyze)
Definition: vacuum.c:2426
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1330
static int lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items, bool *vm_page_frozen)
Definition: vacuumlazy.c:1999
static BlockNumber heap_vac_scan_next_block(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: vacuumlazy.c:1627
static void lazy_vacuum(LVRelState *vacrel)
Definition: vacuumlazy.c:2516
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3089
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
Definition: vacuumlazy.c:2305
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
Definition: vacuumlazy.c:1864
#define FAILSAFE_EVERY_PAGES
Definition: vacuumlazy.c:192
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:201
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)

References LVRelState::aggressive, Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), CheckBufferIsPinnedOnce(), ConditionalLockBufferForCleanup(), LVRelState::consider_bypass_optimization, LVRelState::current_block, LVRelState::dbname, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::eager_scan_max_fails_per_region, LVRelState::eager_scan_remaining_fails, LVRelState::eager_scan_remaining_successes, LVRelState::eager_scanned_pages, ereport, errmsg(), FAILSAFE_EVERY_PAGES, FreeSpaceMapVacuumRange(), heap_vac_scan_next_block(), INFO, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_new_or_empty(), lazy_scan_noprune(), lazy_scan_prune(), lazy_vacuum(), LVRelState::live_tuples, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, Max, VacDeadItemsInfo::max_bytes, LVRelState::missed_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::next_eager_scan_region_start, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::nindexes, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), LVRelState::recently_dead_tuples, RecordPageWithFreeSpace(), LVRelState::rel, LVRelState::rel_pages, ReleaseBuffer(), LVRelState::relname, LVRelState::relnamespace, LVRelState::scanned_pages, TidStoreMemoryUsage(), UnlockReleaseBuffer(), update_vacuum_error_info(), VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM, VAC_BLK_WAS_EAGER_SCANNED, vac_estimate_reltuples(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, LVRelState::verbose, and visibilitymap_pin().

Referenced by heap_vacuum_rel().

◆ lazy_scan_new_or_empty()

static bool lazy_scan_new_or_empty ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool  sharelock,
Buffer  vmbuffer 
)
static

Definition at line 1864 of file vacuumlazy.c.

1866{
1867 Size freespace;
1868
1869 if (PageIsNew(page))
1870 {
1871 /*
1872 * All-zeroes pages can be left over if either a backend extends the
1873 * relation by a single page, but crashes before the newly initialized
1874 * page has been written out, or when bulk-extending the relation
1875 * (which creates a number of empty pages at the tail end of the
1876 * relation), and then enters them into the FSM.
1877 *
1878 * Note we do not enter the page into the visibilitymap. That has the
1879 * downside that we repeatedly visit this page in subsequent vacuums,
1880 * but otherwise we'll never discover the space on a promoted standby.
1881 * The harm of repeated checking ought to normally not be too bad. The
1882 * space usually should be used at some point, otherwise there
1883 * wouldn't be any regular vacuums.
1884 *
1885 * Make sure these pages are in the FSM, to ensure they can be reused.
1886 * Do that by testing if there's any space recorded for the page. If
1887 * not, enter it. We do so after releasing the lock on the heap page,
1888 * the FSM is approximate, after all.
1889 */
1891
1892 if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1893 {
1894 freespace = BLCKSZ - SizeOfPageHeaderData;
1895
1896 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1897 }
1898
1899 return true;
1900 }
1901
1902 if (PageIsEmpty(page))
1903 {
1904 /*
1905 * It seems likely that caller will always be able to get a cleanup
1906 * lock on an empty page. But don't take any chances -- escalate to
1907 * an exclusive lock (still don't need a cleanup lock, though).
1908 */
1909 if (sharelock)
1910 {
1913
1914 if (!PageIsEmpty(page))
1915 {
1916 /* page isn't new or empty -- keep lock and pin for now */
1917 return false;
1918 }
1919 }
1920 else
1921 {
1922 /* Already have a full cleanup lock (which is more than enough) */
1923 }
1924
1925 /*
1926 * Unlike new pages, empty pages are always set all-visible and
1927 * all-frozen.
1928 */
1929 if (!PageIsAllVisible(page))
1930 {
1932
1933 /* mark buffer dirty before writing a WAL record */
1935
1936 /*
1937 * It's possible that another backend has extended the heap,
1938 * initialized the page, and then failed to WAL-log the page due
1939 * to an ERROR. Since heap extension is not WAL-logged, recovery
1940 * might try to replay our record setting the page all-visible and
1941 * find that the page isn't initialized, which will cause a PANIC.
1942 * To prevent that, check whether the page has been previously
1943 * WAL-logged, and if not, do that now.
1944 */
1945 if (RelationNeedsWAL(vacrel->rel) &&
1947 log_newpage_buffer(buf, true);
1948
1949 PageSetAllVisible(page);
1950 visibilitymap_set(vacrel->rel, blkno, buf,
1952 vmbuffer, InvalidTransactionId,
1956
1957 /* Count the newly all-frozen pages for logging */
1958 vacrel->vm_new_visible_pages++;
1960 }
1961
1962 freespace = PageGetHeapFreeSpace(page);
1964 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1965 return true;
1966 }
1967
1968 /* page isn't new or empty -- keep lock and pin */
1969 return false;
1970}
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:3037
@ BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:207
static bool PageIsAllVisible(const PageData *page)
Definition: bufpage.h:428
#define SizeOfPageHeaderData
Definition: bufpage.h:216
static void PageSetAllVisible(Page page)
Definition: bufpage.h:433
static XLogRecPtr PageGetLSN(const PageData *page)
Definition: bufpage.h:385
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:244
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
#define RelationNeedsWAL(relation)
Definition: rel.h:638
uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define XLogRecPtrIsValid(r)
Definition: xlogdefs.h:29
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1259

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, END_CRIT_SECTION, GetRecordedFreeSpace(), InvalidTransactionId, InvalidXLogRecPtr, LockBuffer(), log_newpage_buffer(), MarkBufferDirty(), PageGetHeapFreeSpace(), PageGetLSN(), PageIsAllVisible(), PageIsEmpty(), PageIsNew(), PageSetAllVisible(), RecordPageWithFreeSpace(), LVRelState::rel, RelationNeedsWAL, SizeOfPageHeaderData, START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, and XLogRecPtrIsValid.

Referenced by lazy_scan_heap().

◆ lazy_scan_noprune()

static bool lazy_scan_noprune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool *  has_lpdead_items 
)
static

Definition at line 2305 of file vacuumlazy.c.

2310{
2311 OffsetNumber offnum,
2312 maxoff;
2313 int lpdead_items,
2314 live_tuples,
2315 recently_dead_tuples,
2316 missed_dead_tuples;
2317 bool hastup;
2318 HeapTupleHeader tupleheader;
2319 TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
2320 MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
2322
2323 Assert(BufferGetBlockNumber(buf) == blkno);
2324
2325 hastup = false; /* for now */
2326
2327 lpdead_items = 0;
2328 live_tuples = 0;
2329 recently_dead_tuples = 0;
2330 missed_dead_tuples = 0;
2331
2332 maxoff = PageGetMaxOffsetNumber(page);
2333 for (offnum = FirstOffsetNumber;
2334 offnum <= maxoff;
2335 offnum = OffsetNumberNext(offnum))
2336 {
2337 ItemId itemid;
2338 HeapTupleData tuple;
2339
2340 vacrel->offnum = offnum;
2341 itemid = PageGetItemId(page, offnum);
2342
2343 if (!ItemIdIsUsed(itemid))
2344 continue;
2345
2346 if (ItemIdIsRedirected(itemid))
2347 {
2348 hastup = true;
2349 continue;
2350 }
2351
2352 if (ItemIdIsDead(itemid))
2353 {
2354 /*
2355 * Deliberately don't set hastup=true here. See same point in
2356 * lazy_scan_prune for an explanation.
2357 */
2358 deadoffsets[lpdead_items++] = offnum;
2359 continue;
2360 }
2361
2362 hastup = true; /* page prevents rel truncation */
2363 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
2364 if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
2365 &NoFreezePageRelfrozenXid,
2366 &NoFreezePageRelminMxid))
2367 {
2368 /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
2369 if (vacrel->aggressive)
2370 {
2371 /*
2372 * Aggressive VACUUMs must always be able to advance rel's
2373 * relfrozenxid to a value >= FreezeLimit (and be able to
2374 * advance rel's relminmxid to a value >= MultiXactCutoff).
2375 * The ongoing aggressive VACUUM won't be able to do that
2376 * unless it can freeze an XID (or MXID) from this tuple now.
2377 *
2378 * The only safe option is to have caller perform processing
2379 * of this page using lazy_scan_prune. Caller might have to
2380 * wait a while for a cleanup lock, but it can't be helped.
2381 */
2382 vacrel->offnum = InvalidOffsetNumber;
2383 return false;
2384 }
2385
2386 /*
2387 * Non-aggressive VACUUMs are under no obligation to advance
2388 * relfrozenxid (even by one XID). We can be much laxer here.
2389 *
2390 * Currently we always just accept an older final relfrozenxid
2391 * and/or relminmxid value. We never make caller wait or work a
2392 * little harder, even when it likely makes sense to do so.
2393 */
2394 }
2395
2396 ItemPointerSet(&(tuple.t_self), blkno, offnum);
2397 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2398 tuple.t_len = ItemIdGetLength(itemid);
2399 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
2400
2401 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
2402 buf))
2403 {
2405 case HEAPTUPLE_LIVE:
2406
2407 /*
2408 * Count both cases as live, just like lazy_scan_prune
2409 */
2410 live_tuples++;
2411
2412 break;
2413 case HEAPTUPLE_DEAD:
2414
2415 /*
2416 * There is some useful work for pruning to do, that won't be
2417 * done due to failure to get a cleanup lock.
2418 */
2419 missed_dead_tuples++;
2420 break;
2422
2423 /*
2424 * Count in recently_dead_tuples, just like lazy_scan_prune
2425 */
2426 recently_dead_tuples++;
2427 break;
2429
2430 /*
2431 * Do not count these rows as live, just like lazy_scan_prune
2432 */
2433 break;
2434 default:
2435 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2436 break;
2437 }
2438 }
2439
2440 vacrel->offnum = InvalidOffsetNumber;
2441
2442 /*
2443 * By here we know for sure that caller can put off freezing and pruning
2444 * this particular page until the next VACUUM. Remember its details now.
2445 * (lazy_scan_prune expects a clean slate, so we have to do this last.)
2446 */
2447 vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
2448 vacrel->NewRelminMxid = NoFreezePageRelminMxid;
2449
2450 /* Save any LP_DEAD items found on the page in dead_items */
2451 if (vacrel->nindexes == 0)
2452 {
2453 /* Using one-pass strategy (since table has no indexes) */
2454 if (lpdead_items > 0)
2455 {
2456 /*
2457 * Perfunctory handling for the corner case where a single pass
2458 * strategy VACUUM cannot get a cleanup lock, and it turns out
2459 * that there is one or more LP_DEAD items: just count the LP_DEAD
2460 * items as missed_dead_tuples instead. (This is a bit dishonest,
2461 * but it beats having to maintain specialized heap vacuuming code
2462 * forever, for vanishingly little benefit.)
2463 */
2464 hastup = true;
2465 missed_dead_tuples += lpdead_items;
2466 }
2467 }
2468 else if (lpdead_items > 0)
2469 {
2470 /*
2471 * Page has LP_DEAD items, and so any references/TIDs that remain in
2472 * indexes will be deleted during index vacuuming (and then marked
2473 * LP_UNUSED in the heap)
2474 */
2475 vacrel->lpdead_item_pages++;
2476
2477 dead_items_add(vacrel, blkno, deadoffsets, lpdead_items);
2478
2479 vacrel->lpdead_items += lpdead_items;
2480 }
2481
2482 /*
2483 * Finally, add relevant page-local counts to whole-VACUUM counts
2484 */
2485 vacrel->live_tuples += live_tuples;
2486 vacrel->recently_dead_tuples += recently_dead_tuples;
2487 vacrel->missed_dead_tuples += missed_dead_tuples;
2488 if (missed_dead_tuples > 0)
2489 vacrel->missed_dead_pages++;
2490
2491 /* Can't truncate this page */
2492 if (hastup)
2493 vacrel->nonempty_pages = blkno + 1;
2494
2495 /* Did we find LP_DEAD items? */
2496 *has_lpdead_items = (lpdead_items > 0);
2497
2498 /* Caller won't need to call lazy_scan_prune with same page */
2499 return true;
2500}
TransactionId MultiXactId
Definition: c.h:681
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition: heapam.c:7910
#define MaxHeapTuplesPerPage
Definition: htup_details.h:624
OffsetNumber offnum
Definition: vacuumlazy.c:295
static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: vacuumlazy.c:3625

References LVRelState::aggressive, Assert(), buf, BufferGetBlockNumber(), LVRelState::cutoffs, dead_items_add(), elog, ERROR, FirstOffsetNumber, heap_tuple_should_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by lazy_scan_heap().

◆ lazy_scan_prune()

static int lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
Buffer  vmbuffer,
bool  all_visible_according_to_vm,
bool *  has_lpdead_items,
bool *  vm_page_frozen 
)
static

Definition at line 1999 of file vacuumlazy.c.

2007{
2008 Relation rel = vacrel->rel;
2009 PruneFreezeResult presult;
2010 PruneFreezeParams params = {
2011 .relation = rel,
2012 .buffer = buf,
2013 .reason = PRUNE_VACUUM_SCAN,
2014 .options = HEAP_PAGE_PRUNE_FREEZE,
2015 .vistest = vacrel->vistest,
2016 .cutoffs = &vacrel->cutoffs,
2017 };
2018
2019 Assert(BufferGetBlockNumber(buf) == blkno);
2020
2021 /*
2022 * Prune all HOT-update chains and potentially freeze tuples on this page.
2023 *
2024 * If the relation has no indexes, we can immediately mark would-be dead
2025 * items LP_UNUSED.
2026 *
2027 * The number of tuples removed from the page is returned in
2028 * presult.ndeleted. It should not be confused with presult.lpdead_items;
2029 * presult.lpdead_items's final value can be thought of as the number of
2030 * tuples that were deleted from indexes.
2031 *
2032 * We will update the VM after collecting LP_DEAD items and freezing
2033 * tuples. Pruning will have determined whether or not the page is
2034 * all-visible.
2035 */
2036 if (vacrel->nindexes == 0)
2038
2040 &presult,
2041 &vacrel->offnum,
2042 &vacrel->NewRelfrozenXid, &vacrel->NewRelminMxid);
2043
2046
2047 if (presult.nfrozen > 0)
2048 {
2049 /*
2050 * We don't increment the new_frozen_tuple_pages instrumentation
2051 * counter when nfrozen == 0, since it only counts pages with newly
2052 * frozen tuples (don't confuse that with pages newly set all-frozen
2053 * in VM).
2054 */
2055 vacrel->new_frozen_tuple_pages++;
2056 }
2057
2058 /*
2059 * VACUUM will call heap_page_is_all_visible() during the second pass over
2060 * the heap to determine all_visible and all_frozen for the page -- this
2061 * is a specialized version of the logic from this function. Now that
2062 * we've finished pruning and freezing, make sure that we're in total
2063 * agreement with heap_page_is_all_visible() using an assertion.
2064 */
2065#ifdef USE_ASSERT_CHECKING
2066 if (presult.all_visible)
2067 {
2068 TransactionId debug_cutoff;
2069 bool debug_all_frozen;
2070
2071 Assert(presult.lpdead_items == 0);
2072
2073 Assert(heap_page_is_all_visible(vacrel->rel, buf,
2074 vacrel->cutoffs.OldestXmin, &debug_all_frozen,
2075 &debug_cutoff, &vacrel->offnum));
2076
2077 Assert(presult.all_frozen == debug_all_frozen);
2078
2079 Assert(!TransactionIdIsValid(debug_cutoff) ||
2080 debug_cutoff == presult.vm_conflict_horizon);
2081 }
2082#endif
2083
2084 /*
2085 * Now save details of the LP_DEAD items from the page in vacrel
2086 */
2087 if (presult.lpdead_items > 0)
2088 {
2089 vacrel->lpdead_item_pages++;
2090
2091 /*
2092 * deadoffsets are collected incrementally in
2093 * heap_page_prune_and_freeze() as each dead line pointer is recorded,
2094 * with an indeterminate order, but dead_items_add requires them to be
2095 * sorted.
2096 */
2097 qsort(presult.deadoffsets, presult.lpdead_items, sizeof(OffsetNumber),
2099
2100 dead_items_add(vacrel, blkno, presult.deadoffsets, presult.lpdead_items);
2101 }
2102
2103 /* Finally, add page-local counts to whole-VACUUM counts */
2104 vacrel->tuples_deleted += presult.ndeleted;
2105 vacrel->tuples_frozen += presult.nfrozen;
2106 vacrel->lpdead_items += presult.lpdead_items;
2107 vacrel->live_tuples += presult.live_tuples;
2108 vacrel->recently_dead_tuples += presult.recently_dead_tuples;
2109
2110 /* Can't truncate this page */
2111 if (presult.hastup)
2112 vacrel->nonempty_pages = blkno + 1;
2113
2114 /* Did we find LP_DEAD items? */
2115 *has_lpdead_items = (presult.lpdead_items > 0);
2116
2117 Assert(!presult.all_visible || !(*has_lpdead_items));
2118 Assert(!presult.all_frozen || presult.all_visible);
2119
2120 /*
2121 * Handle setting visibility map bit based on information from the VM (as
2122 * of last heap_vac_scan_next_block() call), and from all_visible and
2123 * all_frozen variables
2124 */
2125 if (!all_visible_according_to_vm && presult.all_visible)
2126 {
2127 uint8 old_vmbits;
2129
2130 if (presult.all_frozen)
2131 {
2133 flags |= VISIBILITYMAP_ALL_FROZEN;
2134 }
2135
2136 /*
2137 * It should never be the case that the visibility map page is set
2138 * while the page-level bit is clear, but the reverse is allowed (if
2139 * checksums are not enabled). Regardless, set both bits so that we
2140 * get back in sync.
2141 *
2142 * NB: If the heap page is all-visible but the VM bit is not set, we
2143 * don't need to dirty the heap page. However, if checksums are
2144 * enabled, we do need to make sure that the heap page is dirtied
2145 * before passing it to visibilitymap_set(), because it may be logged.
2146 * Given that this situation should only happen in rare cases after a
2147 * crash, it is not worth optimizing.
2148 */
2149 PageSetAllVisible(page);
2151 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
2153 vmbuffer, presult.vm_conflict_horizon,
2154 flags);
2155
2156 /*
2157 * If the page wasn't already set all-visible and/or all-frozen in the
2158 * VM, count it as newly set for logging.
2159 */
2160 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2161 {
2162 vacrel->vm_new_visible_pages++;
2163 if (presult.all_frozen)
2164 {
2166 *vm_page_frozen = true;
2167 }
2168 }
2169 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
2170 presult.all_frozen)
2171 {
2172 vacrel->vm_new_frozen_pages++;
2173 *vm_page_frozen = true;
2174 }
2175 }
2176
2177 /*
2178 * As of PostgreSQL 9.2, the visibility map bit should never be set if the
2179 * page-level bit is clear. However, it's possible that the bit got
2180 * cleared after heap_vac_scan_next_block() was called, so we must recheck
2181 * with buffer lock before concluding that the VM is corrupt.
2182 */
2183 else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
2184 visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
2185 {
2188 errmsg("page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
2189 vacrel->relname, blkno)));
2190
2191 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
2193 }
2194
2195 /*
2196 * It's possible for the value returned by
2197 * GetOldestNonRemovableTransactionId() to move backwards, so it's not
2198 * wrong for us to see tuples that appear to not be visible to everyone
2199 * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
2200 * never moves backwards, but GetOldestNonRemovableTransactionId() is
2201 * conservative and sometimes returns a value that's unnecessarily small,
2202 * so if we see that contradiction it just means that the tuples that we
2203 * think are not visible to everyone yet actually are, and the
2204 * PD_ALL_VISIBLE flag is correct.
2205 *
2206 * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
2207 * however.
2208 */
2209 else if (presult.lpdead_items > 0 && PageIsAllVisible(page))
2210 {
2213 errmsg("page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
2214 vacrel->relname, blkno)));
2215
2216 PageClearAllVisible(page);
2218 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
2220 }
2221
2222 /*
2223 * If the all-visible page is all-frozen but not marked as such yet, mark
2224 * it as all-frozen.
2225 */
2226 else if (all_visible_according_to_vm && presult.all_frozen &&
2227 !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
2228 {
2229 uint8 old_vmbits;
2230
2231 /*
2232 * Avoid relying on all_visible_according_to_vm as a proxy for the
2233 * page-level PD_ALL_VISIBLE bit being set, since it might have become
2234 * stale -- even when all_visible is set
2235 */
2236 if (!PageIsAllVisible(page))
2237 {
2238 PageSetAllVisible(page);
2240 }
2241
2242 /*
2243 * Set the page all-frozen (and all-visible) in the VM.
2244 *
2245 * We can pass InvalidTransactionId as our cutoff_xid, since a
2246 * snapshotConflictHorizon sufficient to make everything safe for REDO
2247 * was logged when the page's tuples were frozen.
2248 */
2250 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
2252 vmbuffer, InvalidTransactionId,
2255
2256 /*
2257 * The page was likely already set all-visible in the VM. However,
2258 * there is a small chance that it was modified sometime between
2259 * setting all_visible_according_to_vm and checking the visibility
2260 * during pruning. Check the return value of old_vmbits anyway to
2261 * ensure the visibility map counters used for logging are accurate.
2262 */
2263 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2264 {
2265 vacrel->vm_new_visible_pages++;
2267 *vm_page_frozen = true;
2268 }
2269
2270 /*
2271 * We already checked that the page was not set all-frozen in the VM
2272 * above, so we don't need to test the value of old_vmbits.
2273 */
2274 else
2275 {
2276 vacrel->vm_new_frozen_pages++;
2277 *vm_page_frozen = true;
2278 }
2279 }
2280
2281 return presult.ndeleted;
2282}
static void PageClearAllVisible(Page page)
Definition: bufpage.h:438
int errcode(int sqlerrcode)
Definition: elog.c:863
#define HEAP_PAGE_PRUNE_FREEZE
Definition: heapam.h:44
@ PRUNE_VACUUM_SCAN
Definition: heapam.h:229
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
Definition: heapam.h:43
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:42
#define qsort(a, b, c, d)
Definition: port.h:499
void heap_page_prune_and_freeze(PruneFreezeParams *params, PruneFreezeResult *presult, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
Definition: pruneheap.c:819
Relation relation
Definition: heapam.h:238
int recently_dead_tuples
Definition: heapam.h:285
TransactionId vm_conflict_horizon
Definition: heapam.h:300
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]
Definition: heapam.h:314
bool all_visible
Definition: heapam.h:298
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static int cmpOffsetNumbers(const void *a, const void *b)
Definition: vacuumlazy.c:1974
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:27
#define VISIBILITYMAP_VALID_BITS

References PruneFreezeResult::all_frozen, PruneFreezeResult::all_visible, Assert(), buf, BufferGetBlockNumber(), cmpOffsetNumbers(), LVRelState::cutoffs, dead_items_add(), PruneFreezeResult::deadoffsets, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), PruneFreezeResult::hastup, heap_page_prune_and_freeze(), HEAP_PAGE_PRUNE_FREEZE, HEAP_PAGE_PRUNE_MARK_UNUSED_NOW, InvalidTransactionId, InvalidXLogRecPtr, LVRelState::live_tuples, PruneFreezeResult::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, PruneFreezeResult::lpdead_items, MarkBufferDirty(), MultiXactIdIsValid, PruneFreezeResult::ndeleted, LVRelState::new_frozen_tuple_pages, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, PruneFreezeResult::nfrozen, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, VacuumCutoffs::OldestXmin, PruneFreezeParams::options, PageClearAllVisible(), PageIsAllVisible(), PageSetAllVisible(), PRUNE_VACUUM_SCAN, qsort, LVRelState::recently_dead_tuples, PruneFreezeResult::recently_dead_tuples, LVRelState::rel, PruneFreezeParams::relation, LVRelState::relname, TransactionIdIsValid, LVRelState::tuples_deleted, LVRelState::tuples_frozen, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, LVRelState::vistest, VM_ALL_FROZEN, PruneFreezeResult::vm_conflict_horizon, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, and WARNING.

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 3286 of file vacuumlazy.c.

3287{
3288 BlockNumber orig_rel_pages = vacrel->rel_pages;
3289 BlockNumber new_rel_pages;
3290 bool lock_waiter_detected;
3291 int lock_retry;
3292
3293 /* Report that we are now truncating */
3296
3297 /* Update error traceback information one last time */
3300
3301 /*
3302 * Loop until no more truncating can be done.
3303 */
3304 do
3305 {
3306 /*
3307 * We need full exclusive lock on the relation in order to do
3308 * truncation. If we can't get it, give up rather than waiting --- we
3309 * don't want to block other backends, and we don't want to deadlock
3310 * (which is quite possible considering we already hold a lower-grade
3311 * lock).
3312 */
3313 lock_waiter_detected = false;
3314 lock_retry = 0;
3315 while (true)
3316 {
3318 break;
3319
3320 /*
3321 * Check for interrupts while trying to (re-)acquire the exclusive
3322 * lock.
3323 */
3325
3326 if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
3328 {
3329 /*
3330 * We failed to establish the lock in the specified number of
3331 * retries. This means we give up truncating.
3332 */
3333 ereport(vacrel->verbose ? INFO : DEBUG2,
3334 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
3335 vacrel->relname)));
3336 return;
3337 }
3338
3339 (void) WaitLatch(MyLatch,
3342 WAIT_EVENT_VACUUM_TRUNCATE);
3344 }
3345
3346 /*
3347 * Now that we have exclusive lock, look to see if the rel has grown
3348 * whilst we were vacuuming with non-exclusive lock. If so, give up;
3349 * the newly added pages presumably contain non-deletable tuples.
3350 */
3351 new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
3352 if (new_rel_pages != orig_rel_pages)
3353 {
3354 /*
3355 * Note: we intentionally don't update vacrel->rel_pages with the
3356 * new rel size here. If we did, it would amount to assuming that
3357 * the new pages are empty, which is unlikely. Leaving the numbers
3358 * alone amounts to assuming that the new pages have the same
3359 * tuple density as existing ones, which is less unlikely.
3360 */
3362 return;
3363 }
3364
3365 /*
3366 * Scan backwards from the end to verify that the end pages actually
3367 * contain no tuples. This is *necessary*, not optional, because
3368 * other backends could have added tuples to these pages whilst we
3369 * were vacuuming.
3370 */
3371 new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
3372 vacrel->blkno = new_rel_pages;
3373
3374 if (new_rel_pages >= orig_rel_pages)
3375 {
3376 /* can't do anything after all */
3378 return;
3379 }
3380
3381 /*
3382 * Okay to truncate.
3383 */
3384 RelationTruncate(vacrel->rel, new_rel_pages);
3385
3386 /*
3387 * We can release the exclusive lock as soon as we have truncated.
3388 * Other backends can't safely access the relation until they have
3389 * processed the smgr invalidation that smgrtruncate sent out ... but
3390 * that should happen as part of standard invalidation processing once
3391 * they acquire lock on the relation.
3392 */
3394
3395 /*
3396 * Update statistics. Here, it *is* correct to adjust rel_pages
3397 * without also touching reltuples, since the tuple count wasn't
3398 * changed by the truncation.
3399 */
3400 vacrel->removed_pages += orig_rel_pages - new_rel_pages;
3401 vacrel->rel_pages = new_rel_pages;
3402
3403 ereport(vacrel->verbose ? INFO : DEBUG2,
3404 (errmsg("table \"%s\": truncated %u to %u pages",
3405 vacrel->relname,
3406 orig_rel_pages, new_rel_pages)));
3407 orig_rel_pages = new_rel_pages;
3408 } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
3409}
struct Latch * MyLatch
Definition: globals.c:63
void ResetLatch(Latch *latch)
Definition: latch.c:374
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:172
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:314
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:278
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:40
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:289
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:179
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:180
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
Definition: vacuumlazy.c:3417
#define WL_TIMEOUT
Definition: waiteventset.h:37
#define WL_EXIT_ON_PM_DEATH
Definition: waiteventset.h:39
#define WL_LATCH_SET
Definition: waiteventset.h:34

References AccessExclusiveLock, LVRelState::blkno, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), INFO, InvalidOffsetNumber, MyLatch, LVRelState::nonempty_pages, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationTruncate(), LVRelState::relname, LVRelState::removed_pages, ResetLatch(), UnlockRelation(), update_vacuum_error_info(), VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, LVRelState::verbose, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 2516 of file vacuumlazy.c.

2517{
2518 bool bypass;
2519
2520 /* Should not end up here with no indexes */
2521 Assert(vacrel->nindexes > 0);
2522 Assert(vacrel->lpdead_item_pages > 0);
2523
2524 if (!vacrel->do_index_vacuuming)
2525 {
2526 Assert(!vacrel->do_index_cleanup);
2527 dead_items_reset(vacrel);
2528 return;
2529 }
2530
2531 /*
2532 * Consider bypassing index vacuuming (and heap vacuuming) entirely.
2533 *
2534 * We currently only do this in cases where the number of LP_DEAD items
2535 * for the entire VACUUM operation is close to zero. This avoids sharp
2536 * discontinuities in the duration and overhead of successive VACUUM
2537 * operations that run against the same table with a fixed workload.
2538 * Ideally, successive VACUUM operations will behave as if there are
2539 * exactly zero LP_DEAD items in cases where there are close to zero.
2540 *
2541 * This is likely to be helpful with a table that is continually affected
2542 * by UPDATEs that can mostly apply the HOT optimization, but occasionally
2543 * have small aberrations that lead to just a few heap pages retaining
2544 * only one or two LP_DEAD items. This is pretty common; even when the
2545 * DBA goes out of their way to make UPDATEs use HOT, it is practically
2546 * impossible to predict whether HOT will be applied in 100% of cases.
2547 * It's far easier to ensure that 99%+ of all UPDATEs against a table use
2548 * HOT through careful tuning.
2549 */
2550 bypass = false;
2551 if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
2552 {
2553 BlockNumber threshold;
2554
2555 Assert(vacrel->num_index_scans == 0);
2556 Assert(vacrel->lpdead_items == vacrel->dead_items_info->num_items);
2557 Assert(vacrel->do_index_vacuuming);
2558 Assert(vacrel->do_index_cleanup);
2559
2560 /*
2561 * This crossover point at which we'll start to do index vacuuming is
2562 * expressed as a percentage of the total number of heap pages in the
2563 * table that are known to have at least one LP_DEAD item. This is
2564 * much more important than the total number of LP_DEAD items, since
2565 * it's a proxy for the number of heap pages whose visibility map bits
2566 * cannot be set on account of bypassing index and heap vacuuming.
2567 *
2568 * We apply one further precautionary test: the space currently used
2569 * to store the TIDs (TIDs that now all point to LP_DEAD items) must
2570 * not exceed 32MB. This limits the risk that we will bypass index
2571 * vacuuming again and again until eventually there is a VACUUM whose
2572 * dead_items space is not CPU cache resident.
2573 *
2574 * We don't take any special steps to remember the LP_DEAD items (such
2575 * as counting them in our final update to the stats system) when the
2576 * optimization is applied. Though the accounting used in analyze.c's
2577 * acquire_sample_rows() will recognize the same LP_DEAD items as dead
2578 * rows in its own stats report, that's okay. The discrepancy should
2579 * be negligible. If this optimization is ever expanded to cover more
2580 * cases then this may need to be reconsidered.
2581 */
2582 threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
2583 bypass = (vacrel->lpdead_item_pages < threshold &&
2584 TidStoreMemoryUsage(vacrel->dead_items) < 32 * 1024 * 1024);
2585 }
2586
2587 if (bypass)
2588 {
2589 /*
2590 * There are almost zero TIDs. Behave as if there were precisely
2591 * zero: bypass index vacuuming, but do index cleanup.
2592 *
2593 * We expect that the ongoing VACUUM operation will finish very
2594 * quickly, so there is no point in considering speeding up as a
2595 * failsafe against wraparound failure. (Index cleanup is expected to
2596 * finish very quickly in cases where there were no ambulkdelete()
2597 * calls.)
2598 */
2599 vacrel->do_index_vacuuming = false;
2600 }
2601 else if (lazy_vacuum_all_indexes(vacrel))
2602 {
2603 /*
2604 * We successfully completed a round of index vacuuming. Do related
2605 * heap vacuuming now.
2606 */
2607 lazy_vacuum_heap_rel(vacrel);
2608 }
2609 else
2610 {
2611 /*
2612 * Failsafe case.
2613 *
2614 * We attempted index vacuuming, but didn't finish a full round/full
2615 * index scan. This happens when relfrozenxid or relminmxid is too
2616 * far in the past.
2617 *
2618 * From this point on the VACUUM operation will do no further index
2619 * vacuuming or heap vacuuming. This VACUUM operation won't end up
2620 * back here again.
2621 */
2623 }
2624
2625 /*
2626 * Forget the LP_DEAD items that we just vacuumed (or just decided to not
2627 * vacuum)
2628 */
2629 dead_items_reset(vacrel);
2630}
static void dead_items_reset(LVRelState *vacrel)
Definition: vacuumlazy.c:3647
#define BYPASS_THRESHOLD_PAGES
Definition: vacuumlazy.c:186
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2641
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
Definition: vacuumlazy.c:2786

References Assert(), BYPASS_THRESHOLD_PAGES, LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::dead_items_info, dead_items_reset(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, LVRelState::rel_pages, TidStoreMemoryUsage(), and VacuumFailsafeActive.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 2641 of file vacuumlazy.c.

2642{
2643 bool allindexes = true;
2644 double old_live_tuples = vacrel->rel->rd_rel->reltuples;
2645 const int progress_start_index[] = {
2648 };
2649 const int progress_end_index[] = {
2653 };
2654 int64 progress_start_val[2];
2655 int64 progress_end_val[3];
2656
2657 Assert(vacrel->nindexes > 0);
2658 Assert(vacrel->do_index_vacuuming);
2659 Assert(vacrel->do_index_cleanup);
2660
2661 /* Precheck for XID wraparound emergencies */
2663 {
2664 /* Wraparound emergency -- don't even start an index scan */
2665 return false;
2666 }
2667
2668 /*
2669 * Report that we are now vacuuming indexes and the number of indexes to
2670 * vacuum.
2671 */
2672 progress_start_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_INDEX;
2673 progress_start_val[1] = vacrel->nindexes;
2674 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
2675
2676 if (!ParallelVacuumIsActive(vacrel))
2677 {
2678 for (int idx = 0; idx < vacrel->nindexes; idx++)
2679 {
2680 Relation indrel = vacrel->indrels[idx];
2681 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2682
2683 vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat,
2684 old_live_tuples,
2685 vacrel);
2686
2687 /* Report the number of indexes vacuumed */
2689 idx + 1);
2690
2692 {
2693 /* Wraparound emergency -- end current index scan */
2694 allindexes = false;
2695 break;
2696 }
2697 }
2698 }
2699 else
2700 {
2701 /* Outsource everything to parallel variant */
2702 parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples,
2703 vacrel->num_index_scans);
2704
2705 /*
2706 * Do a postcheck to consider applying wraparound failsafe now. Note
2707 * that parallel VACUUM only gets the precheck and this postcheck.
2708 */
2710 allindexes = false;
2711 }
2712
2713 /*
2714 * We delete all LP_DEAD items from the first heap pass in all indexes on
2715 * each call here (except calls where we choose to do the failsafe). This
2716 * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2717 * of the failsafe triggering, which prevents the next call from taking
2718 * place).
2719 */
2720 Assert(vacrel->num_index_scans > 0 ||
2721 vacrel->dead_items_info->num_items == vacrel->lpdead_items);
2722 Assert(allindexes || VacuumFailsafeActive);
2723
2724 /*
2725 * Increase and report the number of index scans. Also, we reset
2726 * PROGRESS_VACUUM_INDEXES_TOTAL and PROGRESS_VACUUM_INDEXES_PROCESSED.
2727 *
2728 * We deliberately include the case where we started a round of bulk
2729 * deletes that we weren't able to finish due to the failsafe triggering.
2730 */
2731 vacrel->num_index_scans++;
2732 progress_end_val[0] = 0;
2733 progress_end_val[1] = 0;
2734 progress_end_val[2] = vacrel->num_index_scans;
2735 pgstat_progress_update_multi_param(3, progress_end_index, progress_end_val);
2736
2737 return allindexes;
2738}
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:37
Form_pg_class rd_rel
Definition: rel.h:111
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
Definition: vacuumlazy.c:3157
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)

References Assert(), LVRelState::dead_items_info, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, idx(), LVRelState::indrels, LVRelState::indstats, lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, LVRelState::pvs, RelationData::rd_rel, LVRelState::rel, and VacuumFailsafeActive.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static void lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
OffsetNumber deadoffsets,
int  num_offsets,
Buffer  vmbuffer 
)
static

Definition at line 2904 of file vacuumlazy.c.

2907{
2908 Page page = BufferGetPage(buffer);
2910 int nunused = 0;
2911 TransactionId visibility_cutoff_xid;
2912 TransactionId conflict_xid = InvalidTransactionId;
2913 bool all_frozen;
2914 LVSavedErrInfo saved_err_info;
2915 uint8 vmflags = 0;
2916
2917 Assert(vacrel->do_index_vacuuming);
2918
2920
2921 /* Update error traceback information */
2922 update_vacuum_error_info(vacrel, &saved_err_info,
2925
2926 /*
2927 * Before marking dead items unused, check whether the page will become
2928 * all-visible once that change is applied. This lets us reap the tuples
2929 * and mark the page all-visible within the same critical section,
2930 * enabling both changes to be emitted in a single WAL record. Since the
2931 * visibility checks may perform I/O and allocate memory, they must be
2932 * done outside the critical section.
2933 */
2934 if (heap_page_would_be_all_visible(vacrel->rel, buffer,
2935 vacrel->cutoffs.OldestXmin,
2936 deadoffsets, num_offsets,
2937 &all_frozen, &visibility_cutoff_xid,
2938 &vacrel->offnum))
2939 {
2940 vmflags |= VISIBILITYMAP_ALL_VISIBLE;
2941 if (all_frozen)
2942 {
2943 vmflags |= VISIBILITYMAP_ALL_FROZEN;
2944 Assert(!TransactionIdIsValid(visibility_cutoff_xid));
2945 }
2946
2947 /*
2948 * Take the lock on the vmbuffer before entering a critical section.
2949 * The heap page lock must also be held while updating the VM to
2950 * ensure consistency.
2951 */
2953 }
2954
2956
2957 for (int i = 0; i < num_offsets; i++)
2958 {
2959 ItemId itemid;
2960 OffsetNumber toff = deadoffsets[i];
2961
2962 itemid = PageGetItemId(page, toff);
2963
2964 Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2965 ItemIdSetUnused(itemid);
2966 unused[nunused++] = toff;
2967 }
2968
2969 Assert(nunused > 0);
2970
2971 /* Attempt to truncate line pointer array now */
2973
2974 if ((vmflags & VISIBILITYMAP_VALID_BITS) != 0)
2975 {
2976 /*
2977 * The page is guaranteed to have had dead line pointers, so we always
2978 * set PD_ALL_VISIBLE.
2979 */
2980 PageSetAllVisible(page);
2982 vmbuffer, vmflags,
2983 vacrel->rel->rd_locator);
2984 conflict_xid = visibility_cutoff_xid;
2985 }
2986
2987 /*
2988 * Mark buffer dirty before we write WAL.
2989 */
2990 MarkBufferDirty(buffer);
2991
2992 /* XLOG stuff */
2993 if (RelationNeedsWAL(vacrel->rel))
2994 {
2995 log_heap_prune_and_freeze(vacrel->rel, buffer,
2996 vmflags != 0 ? vmbuffer : InvalidBuffer,
2997 vmflags,
2998 conflict_xid,
2999 false, /* no cleanup lock required */
3001 NULL, 0, /* frozen */
3002 NULL, 0, /* redirected */
3003 NULL, 0, /* dead */
3004 unused, nunused);
3005 }
3006
3008
3009 if ((vmflags & VISIBILITYMAP_ALL_VISIBLE) != 0)
3010 {
3011 /* Count the newly set VM page for logging */
3012 LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
3013 vacrel->vm_new_visible_pages++;
3014 if (all_frozen)
3016 }
3017
3018 /* Revert to the previous phase information for error traceback */
3019 restore_vacuum_error_info(vacrel, &saved_err_info);
3020}
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:834
@ PRUNE_VACUUM_CLEANUP
Definition: heapam.h:230
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, Buffer vmbuffer, uint8 vmflags, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Definition: pruneheap.c:2167
RelFileLocator rd_locator
Definition: rel.h:57
static bool heap_page_would_be_all_visible(Relation rel, Buffer buf, TransactionId OldestXmin, OffsetNumber *deadoffsets, int ndeadoffsets, bool *all_frozen, TransactionId *visibility_cutoff_xid, OffsetNumber *logging_offnum)
Definition: vacuumlazy.c:3741
uint8 visibilitymap_set_vmbits(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)

References Assert(), BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage(), LVRelState::cutoffs, LVRelState::do_index_vacuuming, END_CRIT_SECTION, heap_page_would_be_all_visible(), i, InvalidBuffer, InvalidOffsetNumber, InvalidTransactionId, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, LockBuffer(), log_heap_prune_and_freeze(), MarkBufferDirty(), MaxHeapTuplesPerPage, LVRelState::offnum, VacuumCutoffs::OldestXmin, PageGetItemId(), PageSetAllVisible(), PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PRUNE_VACUUM_CLEANUP, RelationData::rd_locator, LVRelState::rel, RelationNeedsWAL, restore_vacuum_error_info(), START_CRIT_SECTION, TransactionIdIsValid, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set_vmbits(), VISIBILITYMAP_VALID_BITS, LVRelState::vm_new_visible_frozen_pages, and LVRelState::vm_new_visible_pages.

Referenced by lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2786 of file vacuumlazy.c.

2787{
2788 ReadStream *stream;
2789 BlockNumber vacuumed_pages = 0;
2790 Buffer vmbuffer = InvalidBuffer;
2791 LVSavedErrInfo saved_err_info;
2792 TidStoreIter *iter;
2793
2794 Assert(vacrel->do_index_vacuuming);
2795 Assert(vacrel->do_index_cleanup);
2796 Assert(vacrel->num_index_scans > 0);
2797
2798 /* Report that we are now vacuuming the heap */
2801
2802 /* Update error traceback information */
2803 update_vacuum_error_info(vacrel, &saved_err_info,
2806
2807 iter = TidStoreBeginIterate(vacrel->dead_items);
2808
2809 /*
2810 * Set up the read stream for vacuum's second pass through the heap.
2811 *
2812 * It is safe to use batchmode, as vacuum_reap_lp_read_stream_next() does
2813 * not need to wait for IO and does not perform locking. Once we support
2814 * parallelism it should still be fine, as presumably the holder of locks
2815 * would never be blocked by IO while holding the lock.
2816 */
2819 vacrel->bstrategy,
2820 vacrel->rel,
2823 iter,
2824 sizeof(TidStoreIterResult));
2825
2826 while (true)
2827 {
2828 BlockNumber blkno;
2829 Buffer buf;
2830 Page page;
2831 TidStoreIterResult *iter_result;
2832 Size freespace;
2834 int num_offsets;
2835
2836 vacuum_delay_point(false);
2837
2838 buf = read_stream_next_buffer(stream, (void **) &iter_result);
2839
2840 /* The relation is exhausted */
2841 if (!BufferIsValid(buf))
2842 break;
2843
2844 vacrel->blkno = blkno = BufferGetBlockNumber(buf);
2845
2846 Assert(iter_result);
2847 num_offsets = TidStoreGetBlockOffsets(iter_result, offsets, lengthof(offsets));
2848 Assert(num_offsets <= lengthof(offsets));
2849
2850 /*
2851 * Pin the visibility map page in case we need to mark the page
2852 * all-visible. In most cases this will be very cheap, because we'll
2853 * already have the correct page pinned anyway.
2854 */
2855 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
2856
2857 /* We need a non-cleanup exclusive lock to mark dead_items unused */
2859 lazy_vacuum_heap_page(vacrel, blkno, buf, offsets,
2860 num_offsets, vmbuffer);
2861
2862 /* Now that we've vacuumed the page, record its available space */
2863 page = BufferGetPage(buf);
2864 freespace = PageGetHeapFreeSpace(page);
2865
2867 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
2868 vacuumed_pages++;
2869 }
2870
2871 read_stream_end(stream);
2872 TidStoreEndIterate(iter);
2873
2874 vacrel->blkno = InvalidBlockNumber;
2875 if (BufferIsValid(vmbuffer))
2876 ReleaseBuffer(vmbuffer);
2877
2878 /*
2879 * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2880 * the second heap pass. No more, no less.
2881 */
2882 Assert(vacrel->num_index_scans > 1 ||
2883 (vacrel->dead_items_info->num_items == vacrel->lpdead_items &&
2884 vacuumed_pages == vacrel->lpdead_item_pages));
2885
2887 (errmsg("table \"%s\": removed %" PRId64 " dead item identifiers in %u pages",
2888 vacrel->relname, vacrel->dead_items_info->num_items,
2889 vacuumed_pages)));
2890
2891 /* Revert to the previous phase information for error traceback */
2892 restore_vacuum_error_info(vacrel, &saved_err_info);
2893}
#define lengthof(array)
Definition: c.h:801
#define MaxOffsetNumber
Definition: off.h:28
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:38
#define READ_STREAM_USE_BATCHING
Definition: read_stream.h:64
TidStoreIter * TidStoreBeginIterate(TidStore *ts)
Definition: tidstore.c:471
void TidStoreEndIterate(TidStoreIter *iter)
Definition: tidstore.c:518
int TidStoreGetBlockOffsets(TidStoreIterResult *result, OffsetNumber *offsets, int max_offsets)
Definition: tidstore.c:566
static BlockNumber vacuum_reap_lp_read_stream_next(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: vacuumlazy.c:2748
static void lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
Definition: vacuumlazy.c:2904

References Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, ereport, errmsg(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_vacuum_heap_page(), lengthof, LockBuffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, MaxOffsetNumber, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), READ_STREAM_USE_BATCHING, RecordPageWithFreeSpace(), LVRelState::rel, ReleaseBuffer(), LVRelState::relname, restore_vacuum_error_info(), TidStoreBeginIterate(), TidStoreEndIterate(), TidStoreGetBlockOffsets(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, vacuum_reap_lp_read_stream_next(), and visibilitymap_pin().

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 3157 of file vacuumlazy.c.

3159{
3160 IndexVacuumInfo ivinfo;
3161 LVSavedErrInfo saved_err_info;
3162
3163 ivinfo.index = indrel;
3164 ivinfo.heaprel = vacrel->rel;
3165 ivinfo.analyze_only = false;
3166 ivinfo.report_progress = false;
3167 ivinfo.estimated_count = true;
3168 ivinfo.message_level = DEBUG2;
3169 ivinfo.num_heap_tuples = reltuples;
3170 ivinfo.strategy = vacrel->bstrategy;
3171
3172 /*
3173 * Update error traceback information.
3174 *
3175 * The index name is saved during this phase and restored immediately
3176 * after this phase. See vacuum_error_callback.
3177 */
3178 Assert(vacrel->indname == NULL);
3179 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
3180 update_vacuum_error_info(vacrel, &saved_err_info,
3183
3184 /* Do bulk deletion */
3185 istat = vac_bulkdel_one_index(&ivinfo, istat, vacrel->dead_items,
3186 vacrel->dead_items_info);
3187
3188 /* Revert to the previous phase information for error traceback */
3189 restore_vacuum_error_info(vacrel, &saved_err_info);
3190 pfree(vacrel->indname);
3191 vacrel->indname = NULL;
3192
3193 return istat;
3194}
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, TidStore *dead_items, VacDeadItemsInfo *dead_items_info)
Definition: vacuum.c:2633

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3997 of file vacuumlazy.c.

3999{
4000 vacrel->blkno = saved_vacrel->blkno;
4001 vacrel->offnum = saved_vacrel->offnum;
4002 vacrel->phase = saved_vacrel->phase;
4003}
BlockNumber blkno
Definition: vacuumlazy.c:418
VacErrPhase phase
Definition: vacuumlazy.c:420
OffsetNumber offnum
Definition: vacuumlazy.c:419

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 3266 of file vacuumlazy.c.

3267{
3268 BlockNumber possibly_freeable;
3269
3270 if (!vacrel->do_rel_truncate || VacuumFailsafeActive)
3271 return false;
3272
3273 possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
3274 if (possibly_freeable > 0 &&
3275 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
3276 possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION))
3277 return true;
3278
3279 return false;
3280}
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:168
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:169

References LVRelState::do_rel_truncate, LVRelState::nonempty_pages, LVRelState::rel_pages, REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, and VacuumFailsafeActive.

Referenced by heap_vacuum_rel().

◆ update_relstats_all_indexes()

static void update_relstats_all_indexes ( LVRelState vacrel)
static

Definition at line 3879 of file vacuumlazy.c.

3880{
3881 Relation *indrels = vacrel->indrels;
3882 int nindexes = vacrel->nindexes;
3883 IndexBulkDeleteResult **indstats = vacrel->indstats;
3884
3885 Assert(vacrel->do_index_cleanup);
3886
3887 for (int idx = 0; idx < nindexes; idx++)
3888 {
3889 Relation indrel = indrels[idx];
3890 IndexBulkDeleteResult *istat = indstats[idx];
3891
3892 if (istat == NULL || istat->estimated_count)
3893 continue;
3894
3895 /* Update index statistics */
3896 vac_update_relstats(indrel,
3897 istat->num_pages,
3898 istat->num_index_tuples,
3899 0, 0,
3900 false,
3903 NULL, NULL, false);
3904 }
3905}
double num_index_tuples
Definition: genam.h:106

References Assert(), LVRelState::do_index_cleanup, IndexBulkDeleteResult::estimated_count, idx(), LVRelState::indrels, LVRelState::indstats, InvalidMultiXactId, InvalidTransactionId, LVRelState::nindexes, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by heap_vacuum_rel().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3978 of file vacuumlazy.c.

3980{
3981 if (saved_vacrel)
3982 {
3983 saved_vacrel->offnum = vacrel->offnum;
3984 saved_vacrel->blkno = vacrel->blkno;
3985 saved_vacrel->phase = vacrel->phase;
3986 }
3987
3988 vacrel->blkno = blkno;
3989 vacrel->offnum = offnum;
3990 vacrel->phase = phase;
3991}

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void *  arg)
static

Definition at line 3914 of file vacuumlazy.c.

3915{
3916 LVRelState *errinfo = arg;
3917
3918 switch (errinfo->phase)
3919 {
3921 if (BlockNumberIsValid(errinfo->blkno))
3922 {
3923 if (OffsetNumberIsValid(errinfo->offnum))
3924 errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3925 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3926 else
3927 errcontext("while scanning block %u of relation \"%s.%s\"",
3928 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3929 }
3930 else
3931 errcontext("while scanning relation \"%s.%s\"",
3932 errinfo->relnamespace, errinfo->relname);
3933 break;
3934
3936 if (BlockNumberIsValid(errinfo->blkno))
3937 {
3938 if (OffsetNumberIsValid(errinfo->offnum))
3939 errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3940 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3941 else
3942 errcontext("while vacuuming block %u of relation \"%s.%s\"",
3943 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3944 }
3945 else
3946 errcontext("while vacuuming relation \"%s.%s\"",
3947 errinfo->relnamespace, errinfo->relname);
3948 break;
3949
3951 errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3952 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3953 break;
3954
3956 errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3957 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3958 break;
3959
3961 if (BlockNumberIsValid(errinfo->blkno))
3962 errcontext("while truncating relation \"%s.%s\" to %u blocks",
3963 errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3964 break;
3965
3967 default:
3968 return; /* do nothing; the errinfo may not be
3969 * initialized */
3970 }
3971}
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define errcontext
Definition: elog.h:198
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
void * arg

References arg, LVRelState::blkno, BlockNumberIsValid(), errcontext, LVRelState::indname, LVRelState::offnum, OffsetNumberIsValid, LVRelState::phase, LVRelState::relname, LVRelState::relnamespace, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().

◆ vacuum_reap_lp_read_stream_next()

static BlockNumber vacuum_reap_lp_read_stream_next ( ReadStream stream,
void *  callback_private_data,
void *  per_buffer_data 
)
static

Definition at line 2748 of file vacuumlazy.c.

2751{
2752 TidStoreIter *iter = callback_private_data;
2753 TidStoreIterResult *iter_result;
2754
2755 iter_result = TidStoreIterateNext(iter);
2756 if (iter_result == NULL)
2757 return InvalidBlockNumber;
2758
2759 /*
2760 * Save the TidStoreIterResult for later, so we can extract the offsets.
2761 * It is safe to copy the result, according to TidStoreIterateNext().
2762 */
2763 memcpy(per_buffer_data, iter_result, sizeof(*iter_result));
2764
2765 return iter_result->blkno;
2766}
BlockNumber blkno
Definition: tidstore.h:29
TidStoreIterResult * TidStoreIterateNext(TidStoreIter *iter)
Definition: tidstore.c:493

References TidStoreIterResult::blkno, InvalidBlockNumber, and TidStoreIterateNext().

Referenced by lazy_vacuum_heap_rel().