PostgreSQL Source Code git master
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/genam.h"
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/tidstore.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xloginsert.h"
#include "catalog/storage.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "common/int.h"
#include "common/pg_prng.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/read_stream.h"
#include "utils/lsyscache.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 
#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2
 
#define EAGER_SCAN_REGION_SIZE   4096
 
#define VAC_BLK_WAS_EAGER_SCANNED   (1 << 0)
 
#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM   (1 << 1)
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel)
 
static void heap_vacuum_eager_scan_setup (LVRelState *vacrel, const VacuumParams params)
 
static BlockNumber heap_vac_scan_next_block (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 
static void find_next_unskippable_block (LVRelState *vacrel, bool *skipsallvis)
 
static bool lazy_scan_new_or_empty (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
 
static int lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items, bool *vm_page_frozen)
 
static bool lazy_scan_noprune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static void lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_add (LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
 
static void dead_items_reset (LVRelState *vacrel)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_would_be_all_visible (Relation rel, Buffer buf, TransactionId OldestXmin, OffsetNumber *deadoffsets, int ndeadoffsets, bool *all_frozen, TransactionId *visibility_cutoff_xid, OffsetNumber *logging_offnum)
 
static void update_relstats_all_indexes (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, const VacuumParams params, BufferAccessStrategy bstrategy)
 
static int cmpOffsetNumbers (const void *a, const void *b)
 
static BlockNumber vacuum_reap_lp_read_stream_next (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 186 of file vacuumlazy.c.

◆ EAGER_SCAN_REGION_SIZE

#define EAGER_SCAN_REGION_SIZE   4096

Definition at line 249 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 192 of file vacuumlazy.c.

◆ MAX_EAGER_FREEZE_SUCCESS_RATE

#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2

Definition at line 240 of file vacuumlazy.c.

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 220 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 214 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 169 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 168 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 208 of file vacuumlazy.c.

◆ VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM

#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM   (1 << 1)

Definition at line 256 of file vacuumlazy.c.

◆ VAC_BLK_WAS_EAGER_SCANNED

#define VAC_BLK_WAS_EAGER_SCANNED   (1 << 0)

Definition at line 255 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 201 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 178 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 180 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 179 of file vacuumlazy.c.

Typedef Documentation

◆ LVRelState

typedef struct LVRelState LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 223 of file vacuumlazy.c.

224{
VacErrPhase
Definition: vacuumlazy.c:224
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition: vacuumlazy.c:226
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition: vacuumlazy.c:227
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition: vacuumlazy.c:230
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition: vacuumlazy.c:229
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition: vacuumlazy.c:228
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition: vacuumlazy.c:225

Function Documentation

◆ cmpOffsetNumbers()

static int cmpOffsetNumbers ( const void *  a,
const void *  b 
)
static

Definition at line 1946 of file vacuumlazy.c.

1947{
1948 return pg_cmp_u16(*(const OffsetNumber *) a, *(const OffsetNumber *) b);
1949}
static int pg_cmp_u16(uint16 a, uint16 b)
Definition: int.h:707
int b
Definition: isn.c:74
int a
Definition: isn.c:73
uint16 OffsetNumber
Definition: off.h:24

References a, b, and pg_cmp_u16().

Referenced by lazy_scan_prune().

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool *  lock_waiter_detected 
)
static

Definition at line 3389 of file vacuumlazy.c.

3390{
3392 "prefetch size must be power of 2");
3393
3394 BlockNumber blkno;
3395 BlockNumber prefetchedUntil;
3396 instr_time starttime;
3397
3398 /* Initialize the starttime if we check for conflicting lock requests */
3399 INSTR_TIME_SET_CURRENT(starttime);
3400
3401 /*
3402 * Start checking blocks at what we believe relation end to be and move
3403 * backwards. (Strange coding of loop control is needed because blkno is
3404 * unsigned.) To make the scan faster, we prefetch a few blocks at a time
3405 * in forward direction, so that OS-level readahead can kick in.
3406 */
3407 blkno = vacrel->rel_pages;
3408 prefetchedUntil = InvalidBlockNumber;
3409 while (blkno > vacrel->nonempty_pages)
3410 {
3411 Buffer buf;
3412 Page page;
3413 OffsetNumber offnum,
3414 maxoff;
3415 bool hastup;
3416
3417 /*
3418 * Check if another process requests a lock on our relation. We are
3419 * holding an AccessExclusiveLock here, so they will be waiting. We
3420 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
3421 * only check if that interval has elapsed once every 32 blocks to
3422 * keep the number of system calls and actual shared lock table
3423 * lookups to a minimum.
3424 */
3425 if ((blkno % 32) == 0)
3426 {
3427 instr_time currenttime;
3428 instr_time elapsed;
3429
3430 INSTR_TIME_SET_CURRENT(currenttime);
3431 elapsed = currenttime;
3432 INSTR_TIME_SUBTRACT(elapsed, starttime);
3433 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
3435 {
3437 {
3438 ereport(vacrel->verbose ? INFO : DEBUG2,
3439 (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
3440 vacrel->relname)));
3441
3442 *lock_waiter_detected = true;
3443 return blkno;
3444 }
3445 starttime = currenttime;
3446 }
3447 }
3448
3449 /*
3450 * We don't insert a vacuum delay point here, because we have an
3451 * exclusive lock on the table which we want to hold for as short a
3452 * time as possible. We still need to check for interrupts however.
3453 */
3455
3456 blkno--;
3457
3458 /* If we haven't prefetched this lot yet, do so now. */
3459 if (prefetchedUntil > blkno)
3460 {
3461 BlockNumber prefetchStart;
3462 BlockNumber pblkno;
3463
3464 prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
3465 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3466 {
3467 PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
3469 }
3470 prefetchedUntil = prefetchStart;
3471 }
3472
3474 vacrel->bstrategy);
3475
3476 /* In this phase we only need shared access to the buffer */
3478
3479 page = BufferGetPage(buf);
3480
3481 if (PageIsNew(page) || PageIsEmpty(page))
3482 {
3484 continue;
3485 }
3486
3487 hastup = false;
3488 maxoff = PageGetMaxOffsetNumber(page);
3489 for (offnum = FirstOffsetNumber;
3490 offnum <= maxoff;
3491 offnum = OffsetNumberNext(offnum))
3492 {
3493 ItemId itemid;
3494
3495 itemid = PageGetItemId(page, offnum);
3496
3497 /*
3498 * Note: any non-unused item should be taken as a reason to keep
3499 * this page. Even an LP_DEAD item makes truncation unsafe, since
3500 * we must not have cleaned out its index entries.
3501 */
3502 if (ItemIdIsUsed(itemid))
3503 {
3504 hastup = true;
3505 break; /* can stop scanning */
3506 }
3507 } /* scan along page */
3508
3510
3511 /* Done scanning if we found a tuple here */
3512 if (hastup)
3513 return blkno + 1;
3514 }
3515
3516 /*
3517 * If we fall out of the loop, all the previously-thought-to-be-empty
3518 * pages still are; we need not bother to look at the last known-nonempty
3519 * page.
3520 */
3521 return vacrel->nonempty_pages;
3522}
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:653
void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition: bufmgr.c:5604
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5383
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:792
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:436
@ BUFFER_LOCK_SHARE
Definition: bufmgr.h:206
@ RBM_NORMAL
Definition: bufmgr.h:46
static bool PageIsEmpty(const PageData *page)
Definition: bufpage.h:223
static bool PageIsNew(const PageData *page)
Definition: bufpage.h:233
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
PageData * Page
Definition: bufpage.h:81
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:371
#define StaticAssertDecl(condition, errmessage)
Definition: c.h:948
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define DEBUG2
Definition: elog.h:29
#define INFO
Definition: elog.h:34
#define ereport(elevel,...)
Definition: elog.h:150
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:194
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:367
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define FirstOffsetNumber
Definition: off.h:27
static char buf[DEFAULT_XLOG_SEG_SIZE]
Definition: pg_test_fsync.c:71
@ MAIN_FORKNUM
Definition: relpath.h:58
bool verbose
Definition: vacuumlazy.c:297
BlockNumber nonempty_pages
Definition: vacuumlazy.c:340
Relation rel
Definition: vacuumlazy.c:261
BlockNumber rel_pages
Definition: vacuumlazy.c:312
BufferAccessStrategy bstrategy
Definition: vacuumlazy.c:266
char * relname
Definition: vacuumlazy.c:292
#define PREFETCH_SIZE
Definition: vacuumlazy.c:214
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:178

References AccessExclusiveLock, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BufferGetPage(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelState::nonempty_pages, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageIsNew(), PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelState::rel, LVRelState::rel_pages, LVRelState::relname, StaticAssertDecl, UnlockReleaseBuffer(), VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and LVRelState::verbose.

Referenced by lazy_truncate_heap().

◆ dead_items_add()

static void dead_items_add ( LVRelState vacrel,
BlockNumber  blkno,
OffsetNumber offsets,
int  num_offsets 
)
static

Definition at line 3597 of file vacuumlazy.c.

3599{
3600 const int prog_index[2] = {
3603 };
3604 int64 prog_val[2];
3605
3606 TidStoreSetBlockOffsets(vacrel->dead_items, blkno, offsets, num_offsets);
3607 vacrel->dead_items_info->num_items += num_offsets;
3608
3609 /* update the progress information */
3610 prog_val[0] = vacrel->dead_items_info->num_items;
3611 prog_val[1] = TidStoreMemoryUsage(vacrel->dead_items);
3612 pgstat_progress_update_multi_param(2, prog_index, prog_val);
3613}
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
int64_t int64
Definition: c.h:549
#define PROGRESS_VACUUM_DEAD_TUPLE_BYTES
Definition: progress.h:27
#define PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS
Definition: progress.h:28
VacDeadItemsInfo * dead_items_info
Definition: vacuumlazy.c:310
TidStore * dead_items
Definition: vacuumlazy.c:309
int64 num_items
Definition: vacuum.h:300
void TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: tidstore.c:345
size_t TidStoreMemoryUsage(TidStore *ts)
Definition: tidstore.c:532

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::num_items, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS, TidStoreMemoryUsage(), and TidStoreSetBlockOffsets().

Referenced by lazy_scan_noprune(), and lazy_scan_prune().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 3532 of file vacuumlazy.c.

3533{
3534 VacDeadItemsInfo *dead_items_info;
3535 int vac_work_mem = AmAutoVacuumWorkerProcess() &&
3536 autovacuum_work_mem != -1 ?
3538
3539 /*
3540 * Initialize state for a parallel vacuum. As of now, only one worker can
3541 * be used for an index, so we invoke parallelism only if there are at
3542 * least two indexes on a table.
3543 */
3544 if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
3545 {
3546 /*
3547 * Since parallel workers cannot access data in temporary tables, we
3548 * can't perform parallel vacuum on them.
3549 */
3550 if (RelationUsesLocalBuffers(vacrel->rel))
3551 {
3552 /*
3553 * Give warning only if the user explicitly tries to perform a
3554 * parallel vacuum on the temporary table.
3555 */
3556 if (nworkers > 0)
3558 (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3559 vacrel->relname)));
3560 }
3561 else
3562 vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
3563 vacrel->nindexes, nworkers,
3564 vac_work_mem,
3565 vacrel->verbose ? INFO : DEBUG2,
3566 vacrel->bstrategy);
3567
3568 /*
3569 * If parallel mode started, dead_items and dead_items_info spaces are
3570 * allocated in DSM.
3571 */
3572 if (ParallelVacuumIsActive(vacrel))
3573 {
3575 &vacrel->dead_items_info);
3576 return;
3577 }
3578 }
3579
3580 /*
3581 * Serial VACUUM case. Allocate both dead_items and dead_items_info
3582 * locally.
3583 */
3584
3585 dead_items_info = palloc_object(VacDeadItemsInfo);
3586 dead_items_info->max_bytes = vac_work_mem * (Size) 1024;
3587 dead_items_info->num_items = 0;
3588 vacrel->dead_items_info = dead_items_info;
3589
3590 vacrel->dead_items = TidStoreCreateLocal(dead_items_info->max_bytes, true);
3591}
int autovacuum_work_mem
Definition: autovacuum.c:120
size_t Size
Definition: c.h:624
#define WARNING
Definition: elog.h:36
#define palloc_object(type)
Definition: fe_memutils.h:74
int maintenance_work_mem
Definition: globals.c:133
#define AmAutoVacuumWorkerProcess()
Definition: miscadmin.h:383
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:647
ParallelVacuumState * pvs
Definition: vacuumlazy.c:267
int nindexes
Definition: vacuumlazy.c:263
Relation * indrels
Definition: vacuumlazy.c:262
bool do_index_vacuuming
Definition: vacuumlazy.c:277
size_t max_bytes
Definition: vacuum.h:299
TidStore * TidStoreCreateLocal(size_t max_bytes, bool insert_only)
Definition: tidstore.c:162
#define ParallelVacuumIsActive(vacrel)
Definition: vacuumlazy.c:220
TidStore * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, VacDeadItemsInfo **dead_items_info_p)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int vac_work_mem, int elevel, BufferAccessStrategy bstrategy)

References AmAutoVacuumWorkerProcess, autovacuum_work_mem, LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_vacuuming, ereport, errmsg(), LVRelState::indrels, INFO, maintenance_work_mem, VacDeadItemsInfo::max_bytes, LVRelState::nindexes, VacDeadItemsInfo::num_items, palloc_object, parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, LVRelState::pvs, LVRelState::rel, RelationUsesLocalBuffers, LVRelState::relname, TidStoreCreateLocal(), LVRelState::verbose, and WARNING.

Referenced by heap_vacuum_rel().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 3641 of file vacuumlazy.c.

3642{
3643 if (!ParallelVacuumIsActive(vacrel))
3644 {
3645 /* Don't bother with pfree here */
3646 return;
3647 }
3648
3649 /* End parallel mode */
3650 parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
3651 vacrel->pvs = NULL;
3652}
IndexBulkDeleteResult ** indstats
Definition: vacuumlazy.c:346
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)

References LVRelState::indstats, parallel_vacuum_end(), ParallelVacuumIsActive, and LVRelState::pvs.

Referenced by heap_vacuum_rel().

◆ dead_items_reset()

static void dead_items_reset ( LVRelState vacrel)
static

Definition at line 3619 of file vacuumlazy.c.

3620{
3621 if (ParallelVacuumIsActive(vacrel))
3622 {
3625 &vacrel->dead_items_info);
3626 return;
3627 }
3628
3629 /* Recreate the tidstore with the same max_bytes limitation */
3630 TidStoreDestroy(vacrel->dead_items);
3631 vacrel->dead_items = TidStoreCreateLocal(vacrel->dead_items_info->max_bytes, true);
3632
3633 /* Reset the counter */
3634 vacrel->dead_items_info->num_items = 0;
3635}
void TidStoreDestroy(TidStore *ts)
Definition: tidstore.c:317
void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::max_bytes, VacDeadItemsInfo::num_items, parallel_vacuum_get_dead_items(), parallel_vacuum_reset_dead_items(), ParallelVacuumIsActive, LVRelState::pvs, TidStoreCreateLocal(), and TidStoreDestroy().

Referenced by lazy_vacuum().

◆ find_next_unskippable_block()

static void find_next_unskippable_block ( LVRelState vacrel,
bool *  skipsallvis 
)
static

Definition at line 1704 of file vacuumlazy.c.

1705{
1706 BlockNumber rel_pages = vacrel->rel_pages;
1707 BlockNumber next_unskippable_block = vacrel->next_unskippable_block + 1;
1708 Buffer next_unskippable_vmbuffer = vacrel->next_unskippable_vmbuffer;
1709 bool next_unskippable_eager_scanned = false;
1710 bool next_unskippable_allvis;
1711
1712 *skipsallvis = false;
1713
1714 for (;; next_unskippable_block++)
1715 {
1716 uint8 mapbits = visibilitymap_get_status(vacrel->rel,
1717 next_unskippable_block,
1718 &next_unskippable_vmbuffer);
1719
1720 next_unskippable_allvis = (mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0;
1721
1722 /*
1723 * At the start of each eager scan region, normal vacuums with eager
1724 * scanning enabled reset the failure counter, allowing vacuum to
1725 * resume eager scanning if it had been suspended in the previous
1726 * region.
1727 */
1728 if (next_unskippable_block >= vacrel->next_eager_scan_region_start)
1729 {
1733 }
1734
1735 /*
1736 * A block is unskippable if it is not all visible according to the
1737 * visibility map.
1738 */
1739 if (!next_unskippable_allvis)
1740 {
1741 Assert((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0);
1742 break;
1743 }
1744
1745 /*
1746 * Caller must scan the last page to determine whether it has tuples
1747 * (caller must have the opportunity to set vacrel->nonempty_pages).
1748 * This rule avoids having lazy_truncate_heap() take access-exclusive
1749 * lock on rel to attempt a truncation that fails anyway, just because
1750 * there are tuples on the last page (it is likely that there will be
1751 * tuples on other nearby pages as well, but those can be skipped).
1752 *
1753 * Implement this by always treating the last block as unsafe to skip.
1754 */
1755 if (next_unskippable_block == rel_pages - 1)
1756 break;
1757
1758 /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1759 if (!vacrel->skipwithvm)
1760 break;
1761
1762 /*
1763 * All-frozen pages cannot contain XIDs < OldestXmin (XIDs that aren't
1764 * already frozen by now), so this page can be skipped.
1765 */
1766 if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0)
1767 continue;
1768
1769 /*
1770 * Aggressive vacuums cannot skip any all-visible pages that are not
1771 * also all-frozen.
1772 */
1773 if (vacrel->aggressive)
1774 break;
1775
1776 /*
1777 * Normal vacuums with eager scanning enabled only skip all-visible
1778 * but not all-frozen pages if they have hit the failure limit for the
1779 * current eager scan region.
1780 */
1781 if (vacrel->eager_scan_remaining_fails > 0)
1782 {
1783 next_unskippable_eager_scanned = true;
1784 break;
1785 }
1786
1787 /*
1788 * All-visible blocks are safe to skip in a normal vacuum. But
1789 * remember that the final range contains such a block for later.
1790 */
1791 *skipsallvis = true;
1792 }
1793
1794 /* write the local variables back to vacrel */
1795 vacrel->next_unskippable_block = next_unskippable_block;
1796 vacrel->next_unskippable_allvis = next_unskippable_allvis;
1797 vacrel->next_unskippable_eager_scanned = next_unskippable_eager_scanned;
1798 vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
1799}
uint8_t uint8
Definition: c.h:550
Assert(PointerIsAligned(start, uint64))
BlockNumber next_eager_scan_region_start
Definition: vacuumlazy.c:377
bool next_unskippable_eager_scanned
Definition: vacuumlazy.c:362
Buffer next_unskippable_vmbuffer
Definition: vacuumlazy.c:363
BlockNumber eager_scan_remaining_fails
Definition: vacuumlazy.c:409
bool aggressive
Definition: vacuumlazy.c:270
BlockNumber next_unskippable_block
Definition: vacuumlazy.c:360
bool skipwithvm
Definition: vacuumlazy.c:272
bool next_unskippable_allvis
Definition: vacuumlazy.c:361
BlockNumber eager_scan_max_fails_per_region
Definition: vacuumlazy.c:399
#define EAGER_SCAN_REGION_SIZE
Definition: vacuumlazy.c:249
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE

References LVRelState::aggressive, Assert(), LVRelState::eager_scan_max_fails_per_region, EAGER_SCAN_REGION_SIZE, LVRelState::eager_scan_remaining_fails, LVRelState::next_eager_scan_region_start, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::rel, LVRelState::rel_pages, LVRelState::skipwithvm, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_get_status().

Referenced by heap_vac_scan_next_block().

◆ heap_page_would_be_all_visible()

static bool heap_page_would_be_all_visible ( Relation  rel,
Buffer  buf,
TransactionId  OldestXmin,
OffsetNumber deadoffsets,
int  ndeadoffsets,
bool *  all_frozen,
TransactionId visibility_cutoff_xid,
OffsetNumber logging_offnum 
)
static

Definition at line 3709 of file vacuumlazy.c.

3716{
3717 Page page = BufferGetPage(buf);
3719 OffsetNumber offnum,
3720 maxoff;
3721 bool all_visible = true;
3722 int matched_dead_count = 0;
3723
3724 *visibility_cutoff_xid = InvalidTransactionId;
3725 *all_frozen = true;
3726
3727 Assert(ndeadoffsets == 0 || deadoffsets);
3728
3729#ifdef USE_ASSERT_CHECKING
3730 /* Confirm input deadoffsets[] is strictly sorted */
3731 if (ndeadoffsets > 1)
3732 {
3733 for (int i = 1; i < ndeadoffsets; i++)
3734 Assert(deadoffsets[i - 1] < deadoffsets[i]);
3735 }
3736#endif
3737
3738 maxoff = PageGetMaxOffsetNumber(page);
3739 for (offnum = FirstOffsetNumber;
3740 offnum <= maxoff && all_visible;
3741 offnum = OffsetNumberNext(offnum))
3742 {
3743 ItemId itemid;
3744 HeapTupleData tuple;
3745
3746 /*
3747 * Set the offset number so that we can display it along with any
3748 * error that occurred while processing this tuple.
3749 */
3750 *logging_offnum = offnum;
3751 itemid = PageGetItemId(page, offnum);
3752
3753 /* Unused or redirect line pointers are of no interest */
3754 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
3755 continue;
3756
3757 ItemPointerSet(&(tuple.t_self), blockno, offnum);
3758
3759 /*
3760 * Dead line pointers can have index pointers pointing to them. So
3761 * they can't be treated as visible
3762 */
3763 if (ItemIdIsDead(itemid))
3764 {
3765 if (!deadoffsets ||
3766 matched_dead_count >= ndeadoffsets ||
3767 deadoffsets[matched_dead_count] != offnum)
3768 {
3769 *all_frozen = all_visible = false;
3770 break;
3771 }
3772 matched_dead_count++;
3773 continue;
3774 }
3775
3776 Assert(ItemIdIsNormal(itemid));
3777
3778 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3779 tuple.t_len = ItemIdGetLength(itemid);
3780 tuple.t_tableOid = RelationGetRelid(rel);
3781
3782 /* Visibility checks may do IO or allocate memory */
3784 switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
3785 {
3786 case HEAPTUPLE_LIVE:
3787 {
3788 TransactionId xmin;
3789
3790 /* Check comments in lazy_scan_prune. */
3792 {
3793 all_visible = false;
3794 *all_frozen = false;
3795 break;
3796 }
3797
3798 /*
3799 * The inserter definitely committed. But is it old enough
3800 * that everyone sees it as committed?
3801 */
3802 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3803 if (!TransactionIdPrecedes(xmin, OldestXmin))
3804 {
3805 all_visible = false;
3806 *all_frozen = false;
3807 break;
3808 }
3809
3810 /* Track newest xmin on page. */
3811 if (TransactionIdFollows(xmin, *visibility_cutoff_xid) &&
3813 *visibility_cutoff_xid = xmin;
3814
3815 /* Check whether this tuple is already frozen or not */
3816 if (all_visible && *all_frozen &&
3818 *all_frozen = false;
3819 }
3820 break;
3821
3822 case HEAPTUPLE_DEAD:
3826 {
3827 all_visible = false;
3828 *all_frozen = false;
3829 break;
3830 }
3831 default:
3832 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3833 break;
3834 }
3835 } /* scan along page */
3836
3837 /* Clear the offset information once we have processed the given page. */
3838 *logging_offnum = InvalidOffsetNumber;
3839
3840 return all_visible;
3841}
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4223
static void * PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:353
uint32 TransactionId
Definition: c.h:671
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
volatile uint32 CritSectionCount
Definition: globals.c:45
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7836
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:128
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:129
@ HEAPTUPLE_LIVE
Definition: heapam.h:127
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:130
@ HEAPTUPLE_DEAD
Definition: heapam.h:126
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
Definition: htup_details.h:324
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
Definition: htup_details.h:337
int i
Definition: isn.c:77
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define InvalidOffsetNumber
Definition: off.h:26
#define RelationGetRelid(relation)
Definition: rel.h:515
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
static bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.h:297
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.h:263

References Assert(), buf, BufferGetBlockNumber(), BufferGetPage(), CritSectionCount, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin(), HeapTupleHeaderXminCommitted(), HeapTupleSatisfiesVacuum(), i, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, and TransactionIdPrecedes().

Referenced by lazy_vacuum_heap_page().

◆ heap_vac_scan_next_block()

static BlockNumber heap_vac_scan_next_block ( ReadStream stream,
void *  callback_private_data,
void *  per_buffer_data 
)
static

Definition at line 1599 of file vacuumlazy.c.

1602{
1603 BlockNumber next_block;
1604 LVRelState *vacrel = callback_private_data;
1605 uint8 blk_info = 0;
1606
1607 /* relies on InvalidBlockNumber + 1 overflowing to 0 on first call */
1608 next_block = vacrel->current_block + 1;
1609
1610 /* Have we reached the end of the relation? */
1611 if (next_block >= vacrel->rel_pages)
1612 {
1614 {
1617 }
1618 return InvalidBlockNumber;
1619 }
1620
1621 /*
1622 * We must be in one of the three following states:
1623 */
1624 if (next_block > vacrel->next_unskippable_block ||
1626 {
1627 /*
1628 * 1. We have just processed an unskippable block (or we're at the
1629 * beginning of the scan). Find the next unskippable block using the
1630 * visibility map.
1631 */
1632 bool skipsallvis;
1633
1634 find_next_unskippable_block(vacrel, &skipsallvis);
1635
1636 /*
1637 * We now know the next block that we must process. It can be the
1638 * next block after the one we just processed, or something further
1639 * ahead. If it's further ahead, we can jump to it, but we choose to
1640 * do so only if we can skip at least SKIP_PAGES_THRESHOLD consecutive
1641 * pages. Since we're reading sequentially, the OS should be doing
1642 * readahead for us, so there's no gain in skipping a page now and
1643 * then. Skipping such a range might even discourage sequential
1644 * detection.
1645 *
1646 * This test also enables more frequent relfrozenxid advancement
1647 * during non-aggressive VACUUMs. If the range has any all-visible
1648 * pages then skipping makes updating relfrozenxid unsafe, which is a
1649 * real downside.
1650 */
1651 if (vacrel->next_unskippable_block - next_block >= SKIP_PAGES_THRESHOLD)
1652 {
1653 next_block = vacrel->next_unskippable_block;
1654 if (skipsallvis)
1655 vacrel->skippedallvis = true;
1656 }
1657 }
1658
1659 /* Now we must be in one of the two remaining states: */
1660 if (next_block < vacrel->next_unskippable_block)
1661 {
1662 /*
1663 * 2. We are processing a range of blocks that we could have skipped
1664 * but chose not to. We know that they are all-visible in the VM,
1665 * otherwise they would've been unskippable.
1666 */
1667 vacrel->current_block = next_block;
1669 *((uint8 *) per_buffer_data) = blk_info;
1670 return vacrel->current_block;
1671 }
1672 else
1673 {
1674 /*
1675 * 3. We reached the next unskippable block. Process it. On next
1676 * iteration, we will be back in state 1.
1677 */
1678 Assert(next_block == vacrel->next_unskippable_block);
1679
1680 vacrel->current_block = next_block;
1681 if (vacrel->next_unskippable_allvis)
1684 blk_info |= VAC_BLK_WAS_EAGER_SCANNED;
1685 *((uint8 *) per_buffer_data) = blk_info;
1686 return vacrel->current_block;
1687 }
1688}
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5366
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:387
BlockNumber current_block
Definition: vacuumlazy.c:359
bool skippedallvis
Definition: vacuumlazy.c:287
#define VAC_BLK_WAS_EAGER_SCANNED
Definition: vacuumlazy.c:255
static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
Definition: vacuumlazy.c:1704
#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM
Definition: vacuumlazy.c:256
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:208

References Assert(), BufferIsValid(), LVRelState::current_block, find_next_unskippable_block(), InvalidBlockNumber, InvalidBuffer, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::rel_pages, ReleaseBuffer(), SKIP_PAGES_THRESHOLD, LVRelState::skippedallvis, VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM, and VAC_BLK_WAS_EAGER_SCANNED.

Referenced by lazy_scan_heap().

◆ heap_vacuum_eager_scan_setup()

static void heap_vacuum_eager_scan_setup ( LVRelState vacrel,
const VacuumParams  params 
)
static

Definition at line 500 of file vacuumlazy.c.

501{
502 uint32 randseed;
503 BlockNumber allvisible;
504 BlockNumber allfrozen;
505 float first_region_ratio;
506 bool oldest_unfrozen_before_cutoff = false;
507
508 /*
509 * Initialize eager scan management fields to their disabled values.
510 * Aggressive vacuums, normal vacuums of small tables, and normal vacuums
511 * of tables without sufficiently old tuples disable eager scanning.
512 */
515 vacrel->eager_scan_remaining_fails = 0;
517
518 /* If eager scanning is explicitly disabled, just return. */
519 if (params.max_eager_freeze_failure_rate == 0)
520 return;
521
522 /*
523 * The caller will have determined whether or not an aggressive vacuum is
524 * required by either the vacuum parameters or the relative age of the
525 * oldest unfrozen transaction IDs. An aggressive vacuum must scan every
526 * all-visible page to safely advance the relfrozenxid and/or relminmxid,
527 * so scans of all-visible pages are not considered eager.
528 */
529 if (vacrel->aggressive)
530 return;
531
532 /*
533 * Aggressively vacuuming a small relation shouldn't take long, so it
534 * isn't worth amortizing. We use two times the region size as the size
535 * cutoff because the eager scan start block is a random spot somewhere in
536 * the first region, making the second region the first to be eager
537 * scanned normally.
538 */
539 if (vacrel->rel_pages < 2 * EAGER_SCAN_REGION_SIZE)
540 return;
541
542 /*
543 * We only want to enable eager scanning if we are likely to be able to
544 * freeze some of the pages in the relation.
545 *
546 * Tuples with XIDs older than OldestXmin or MXIDs older than OldestMxact
547 * are technically freezable, but we won't freeze them unless the criteria
548 * for opportunistic freezing is met. Only tuples with XIDs/MXIDs older
549 * than the FreezeLimit/MultiXactCutoff are frozen in the common case.
550 *
551 * So, as a heuristic, we wait until the FreezeLimit has advanced past the
552 * relfrozenxid or the MultiXactCutoff has advanced past the relminmxid to
553 * enable eager scanning.
554 */
557 vacrel->cutoffs.FreezeLimit))
558 oldest_unfrozen_before_cutoff = true;
559
560 if (!oldest_unfrozen_before_cutoff &&
563 vacrel->cutoffs.MultiXactCutoff))
564 oldest_unfrozen_before_cutoff = true;
565
566 if (!oldest_unfrozen_before_cutoff)
567 return;
568
569 /* We have met the criteria to eagerly scan some pages. */
570
571 /*
572 * Our success cap is MAX_EAGER_FREEZE_SUCCESS_RATE of the number of
573 * all-visible but not all-frozen blocks in the relation.
574 */
575 visibilitymap_count(vacrel->rel, &allvisible, &allfrozen);
576
579 (allvisible - allfrozen));
580
581 /* If every all-visible page is frozen, eager scanning is disabled. */
582 if (vacrel->eager_scan_remaining_successes == 0)
583 return;
584
585 /*
586 * Now calculate the bounds of the first eager scan region. Its end block
587 * will be a random spot somewhere in the first EAGER_SCAN_REGION_SIZE
588 * blocks. This affects the bounds of all subsequent regions and avoids
589 * eager scanning and failing to freeze the same blocks each vacuum of the
590 * relation.
591 */
593
595
598
602
603 /*
604 * The first region will be smaller than subsequent regions. As such,
605 * adjust the eager freeze failures tolerated for this region.
606 */
607 first_region_ratio = 1 - (float) vacrel->next_eager_scan_region_start /
609
612 first_region_ratio;
613}
uint32_t uint32
Definition: c.h:552
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:2828
#define MultiXactIdIsValid(multi)
Definition: multixact.h:29
uint32 pg_prng_uint32(pg_prng_state *state)
Definition: pg_prng.c:227
pg_prng_state pg_global_prng_state
Definition: pg_prng.c:34
BlockNumber eager_scan_remaining_successes
Definition: vacuumlazy.c:388
struct VacuumCutoffs cutoffs
Definition: vacuumlazy.c:282
TransactionId FreezeLimit
Definition: vacuum.h:289
TransactionId relfrozenxid
Definition: vacuum.h:263
MultiXactId relminmxid
Definition: vacuum.h:264
MultiXactId MultiXactCutoff
Definition: vacuum.h:290
double max_eager_freeze_failure_rate
Definition: vacuum.h:244
#define MAX_EAGER_FREEZE_SUCCESS_RATE
Definition: vacuumlazy.c:240
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)

References LVRelState::aggressive, Assert(), LVRelState::cutoffs, LVRelState::eager_scan_max_fails_per_region, EAGER_SCAN_REGION_SIZE, LVRelState::eager_scan_remaining_fails, LVRelState::eager_scan_remaining_successes, VacuumCutoffs::FreezeLimit, InvalidBlockNumber, VacuumParams::max_eager_freeze_failure_rate, MAX_EAGER_FREEZE_SUCCESS_RATE, VacuumCutoffs::MultiXactCutoff, MultiXactIdIsValid, MultiXactIdPrecedes(), LVRelState::next_eager_scan_region_start, pg_global_prng_state, pg_prng_uint32(), LVRelState::rel, LVRelState::rel_pages, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, TransactionIdIsNormal, TransactionIdPrecedes(), and visibilitymap_count().

Referenced by heap_vacuum_rel().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
const VacuumParams  params,
BufferAccessStrategy  bstrategy 
)

Definition at line 627 of file vacuumlazy.c.

629{
630 LVRelState *vacrel;
631 bool verbose,
632 instrument,
633 skipwithvm,
634 frozenxid_updated,
635 minmulti_updated;
636 BlockNumber orig_rel_pages,
637 new_rel_pages,
638 new_rel_allvisible,
639 new_rel_allfrozen;
640 PGRUsage ru0;
641 TimestampTz starttime = 0;
642 PgStat_Counter startreadtime = 0,
643 startwritetime = 0;
644 WalUsage startwalusage = pgWalUsage;
645 BufferUsage startbufferusage = pgBufferUsage;
646 ErrorContextCallback errcallback;
647 char **indnames = NULL;
648
649 verbose = (params.options & VACOPT_VERBOSE) != 0;
650 instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
651 params.log_vacuum_min_duration >= 0));
652 if (instrument)
653 {
654 pg_rusage_init(&ru0);
655 if (track_io_timing)
656 {
657 startreadtime = pgStatBlockReadTime;
658 startwritetime = pgStatBlockWriteTime;
659 }
660 }
661
662 /* Used for instrumentation and stats report */
663 starttime = GetCurrentTimestamp();
664
666 RelationGetRelid(rel));
669 params.is_wraparound
672 else
675
676 /*
677 * Setup error traceback support for ereport() first. The idea is to set
678 * up an error context callback to display additional information on any
679 * error during a vacuum. During different phases of vacuum, we update
680 * the state so that the error context callback always display current
681 * information.
682 *
683 * Copy the names of heap rel into local memory for error reporting
684 * purposes, too. It isn't always safe to assume that we can get the name
685 * of each rel. It's convenient for code in lazy_scan_heap to always use
686 * these temp copies.
687 */
688 vacrel = palloc0_object(LVRelState);
692 vacrel->indname = NULL;
694 vacrel->verbose = verbose;
695 errcallback.callback = vacuum_error_callback;
696 errcallback.arg = vacrel;
697 errcallback.previous = error_context_stack;
698 error_context_stack = &errcallback;
699
700 /* Set up high level stuff about rel and its indexes */
701 vacrel->rel = rel;
703 &vacrel->indrels);
704 vacrel->bstrategy = bstrategy;
705 if (instrument && vacrel->nindexes > 0)
706 {
707 /* Copy index names used by instrumentation (not error reporting) */
708 indnames = palloc_array(char *, vacrel->nindexes);
709 for (int i = 0; i < vacrel->nindexes; i++)
710 indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
711 }
712
713 /*
714 * The index_cleanup param either disables index vacuuming and cleanup or
715 * forces it to go ahead when we would otherwise apply the index bypass
716 * optimization. The default is 'auto', which leaves the final decision
717 * up to lazy_vacuum().
718 *
719 * The truncate param allows user to avoid attempting relation truncation,
720 * though it can't force truncation to happen.
721 */
724 params.truncate != VACOPTVALUE_AUTO);
725
726 /*
727 * While VacuumFailSafeActive is reset to false before calling this, we
728 * still need to reset it here due to recursive calls.
729 */
730 VacuumFailsafeActive = false;
731 vacrel->consider_bypass_optimization = true;
732 vacrel->do_index_vacuuming = true;
733 vacrel->do_index_cleanup = true;
734 vacrel->do_rel_truncate = (params.truncate != VACOPTVALUE_DISABLED);
736 {
737 /* Force disable index vacuuming up-front */
738 vacrel->do_index_vacuuming = false;
739 vacrel->do_index_cleanup = false;
740 }
741 else if (params.index_cleanup == VACOPTVALUE_ENABLED)
742 {
743 /* Force index vacuuming. Note that failsafe can still bypass. */
744 vacrel->consider_bypass_optimization = false;
745 }
746 else
747 {
748 /* Default/auto, make all decisions dynamically */
750 }
751
752 /* Initialize page counters explicitly (be tidy) */
753 vacrel->scanned_pages = 0;
754 vacrel->eager_scanned_pages = 0;
755 vacrel->removed_pages = 0;
756 vacrel->new_frozen_tuple_pages = 0;
757 vacrel->lpdead_item_pages = 0;
758 vacrel->missed_dead_pages = 0;
759 vacrel->nonempty_pages = 0;
760 /* dead_items_alloc allocates vacrel->dead_items later on */
761
762 /* Allocate/initialize output statistics state */
763 vacrel->new_rel_tuples = 0;
764 vacrel->new_live_tuples = 0;
765 vacrel->indstats = (IndexBulkDeleteResult **)
766 palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
767
768 /* Initialize remaining counters (be tidy) */
769 vacrel->num_index_scans = 0;
770 vacrel->tuples_deleted = 0;
771 vacrel->tuples_frozen = 0;
772 vacrel->lpdead_items = 0;
773 vacrel->live_tuples = 0;
774 vacrel->recently_dead_tuples = 0;
775 vacrel->missed_dead_tuples = 0;
776
777 vacrel->vm_new_visible_pages = 0;
778 vacrel->vm_new_visible_frozen_pages = 0;
779 vacrel->vm_new_frozen_pages = 0;
780
781 /*
782 * Get cutoffs that determine which deleted tuples are considered DEAD,
783 * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
784 * the extent of the blocks that we'll scan in lazy_scan_heap. It has to
785 * happen in this order to ensure that the OldestXmin cutoff field works
786 * as an upper bound on the XIDs stored in the pages we'll actually scan
787 * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
788 *
789 * Next acquire vistest, a related cutoff that's used in pruning. We use
790 * vistest in combination with OldestXmin to ensure that
791 * heap_page_prune_and_freeze() always removes any deleted tuple whose
792 * xmax is < OldestXmin. lazy_scan_prune must never become confused about
793 * whether a tuple should be frozen or removed. (In the future we might
794 * want to teach lazy_scan_prune to recompute vistest from time to time,
795 * to increase the number of dead tuples it can prune away.)
796 */
797 vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
798 vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
799 vacrel->vistest = GlobalVisTestFor(rel);
800
801 /* Initialize state used to track oldest extant XID/MXID */
802 vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
803 vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
804
805 /*
806 * Initialize state related to tracking all-visible page skipping. This is
807 * very important to determine whether or not it is safe to advance the
808 * relfrozenxid/relminmxid.
809 */
810 vacrel->skippedallvis = false;
811 skipwithvm = true;
813 {
814 /*
815 * Force aggressive mode, and disable skipping blocks using the
816 * visibility map (even those set all-frozen)
817 */
818 vacrel->aggressive = true;
819 skipwithvm = false;
820 }
821
822 vacrel->skipwithvm = skipwithvm;
823
824 /*
825 * Set up eager scan tracking state. This must happen after determining
826 * whether or not the vacuum must be aggressive, because only normal
827 * vacuums use the eager scan algorithm.
828 */
829 heap_vacuum_eager_scan_setup(vacrel, params);
830
831 /* Report the vacuum mode: 'normal' or 'aggressive' */
833 vacrel->aggressive
836
837 if (verbose)
838 {
839 if (vacrel->aggressive)
841 (errmsg("aggressively vacuuming \"%s.%s.%s\"",
842 vacrel->dbname, vacrel->relnamespace,
843 vacrel->relname)));
844 else
846 (errmsg("vacuuming \"%s.%s.%s\"",
847 vacrel->dbname, vacrel->relnamespace,
848 vacrel->relname)));
849 }
850
851 /*
852 * Allocate dead_items memory using dead_items_alloc. This handles
853 * parallel VACUUM initialization as part of allocating shared memory
854 * space used for dead_items. (But do a failsafe precheck first, to
855 * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
856 * is already dangerously old.)
857 */
859 dead_items_alloc(vacrel, params.nworkers);
860
861 /*
862 * Call lazy_scan_heap to perform all required heap pruning, index
863 * vacuuming, and heap vacuuming (plus related processing)
864 */
865 lazy_scan_heap(vacrel);
866
867 /*
868 * Free resources managed by dead_items_alloc. This ends parallel mode in
869 * passing when necessary.
870 */
871 dead_items_cleanup(vacrel);
873
874 /*
875 * Update pg_class entries for each of rel's indexes where appropriate.
876 *
877 * Unlike the later update to rel's pg_class entry, this is not critical.
878 * Maintains relpages/reltuples statistics used by the planner only.
879 */
880 if (vacrel->do_index_cleanup)
882
883 /* Done with rel's indexes */
884 vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
885
886 /* Optionally truncate rel */
887 if (should_attempt_truncation(vacrel))
888 lazy_truncate_heap(vacrel);
889
890 /* Pop the error context stack */
891 error_context_stack = errcallback.previous;
892
893 /* Report that we are now doing final cleanup */
896
897 /*
898 * Prepare to update rel's pg_class entry.
899 *
900 * Aggressive VACUUMs must always be able to advance relfrozenxid to a
901 * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
902 * Non-aggressive VACUUMs may advance them by any amount, or not at all.
903 */
904 Assert(vacrel->NewRelfrozenXid == vacrel->cutoffs.OldestXmin ||
906 vacrel->cutoffs.relfrozenxid,
907 vacrel->NewRelfrozenXid));
908 Assert(vacrel->NewRelminMxid == vacrel->cutoffs.OldestMxact ||
910 vacrel->cutoffs.relminmxid,
911 vacrel->NewRelminMxid));
912 if (vacrel->skippedallvis)
913 {
914 /*
915 * Must keep original relfrozenxid in a non-aggressive VACUUM that
916 * chose to skip an all-visible page range. The state that tracks new
917 * values will have missed unfrozen XIDs from the pages we skipped.
918 */
919 Assert(!vacrel->aggressive);
922 }
923
924 /*
925 * For safety, clamp relallvisible to be not more than what we're setting
926 * pg_class.relpages to
927 */
928 new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
929 visibilitymap_count(rel, &new_rel_allvisible, &new_rel_allfrozen);
930 if (new_rel_allvisible > new_rel_pages)
931 new_rel_allvisible = new_rel_pages;
932
933 /*
934 * An all-frozen block _must_ be all-visible. As such, clamp the count of
935 * all-frozen blocks to the count of all-visible blocks. This matches the
936 * clamping of relallvisible above.
937 */
938 if (new_rel_allfrozen > new_rel_allvisible)
939 new_rel_allfrozen = new_rel_allvisible;
940
941 /*
942 * Now actually update rel's pg_class entry.
943 *
944 * In principle new_live_tuples could be -1 indicating that we (still)
945 * don't know the tuple count. In practice that can't happen, since we
946 * scan every page that isn't skipped using the visibility map.
947 */
948 vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
949 new_rel_allvisible, new_rel_allfrozen,
950 vacrel->nindexes > 0,
951 vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
952 &frozenxid_updated, &minmulti_updated, false);
953
954 /*
955 * Report results to the cumulative stats system, too.
956 *
957 * Deliberately avoid telling the stats system about LP_DEAD items that
958 * remain in the table due to VACUUM bypassing index and heap vacuuming.
959 * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
960 * It seems like a good idea to err on the side of not vacuuming again too
961 * soon in cases where the failsafe prevented significant amounts of heap
962 * vacuuming.
963 */
965 rel->rd_rel->relisshared,
966 Max(vacrel->new_live_tuples, 0),
967 vacrel->recently_dead_tuples +
968 vacrel->missed_dead_tuples,
969 starttime);
971
972 if (instrument)
973 {
975
976 if (verbose || params.log_vacuum_min_duration == 0 ||
977 TimestampDifferenceExceeds(starttime, endtime,
979 {
980 long secs_dur;
981 int usecs_dur;
982 WalUsage walusage;
983 BufferUsage bufferusage;
985 char *msgfmt;
986 int32 diff;
987 double read_rate = 0,
988 write_rate = 0;
989 int64 total_blks_hit;
990 int64 total_blks_read;
991 int64 total_blks_dirtied;
992
993 TimestampDifference(starttime, endtime, &secs_dur, &usecs_dur);
994 memset(&walusage, 0, sizeof(WalUsage));
995 WalUsageAccumDiff(&walusage, &pgWalUsage, &startwalusage);
996 memset(&bufferusage, 0, sizeof(BufferUsage));
997 BufferUsageAccumDiff(&bufferusage, &pgBufferUsage, &startbufferusage);
998
999 total_blks_hit = bufferusage.shared_blks_hit +
1000 bufferusage.local_blks_hit;
1001 total_blks_read = bufferusage.shared_blks_read +
1002 bufferusage.local_blks_read;
1003 total_blks_dirtied = bufferusage.shared_blks_dirtied +
1004 bufferusage.local_blks_dirtied;
1005
1007 if (verbose)
1008 {
1009 /*
1010 * Aggressiveness already reported earlier, in dedicated
1011 * VACUUM VERBOSE ereport
1012 */
1013 Assert(!params.is_wraparound);
1014 msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
1015 }
1016 else if (params.is_wraparound)
1017 {
1018 /*
1019 * While it's possible for a VACUUM to be both is_wraparound
1020 * and !aggressive, that's just a corner-case -- is_wraparound
1021 * implies aggressive. Produce distinct output for the corner
1022 * case all the same, just in case.
1023 */
1024 if (vacrel->aggressive)
1025 msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1026 else
1027 msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1028 }
1029 else
1030 {
1031 if (vacrel->aggressive)
1032 msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
1033 else
1034 msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
1035 }
1036 appendStringInfo(&buf, msgfmt,
1037 vacrel->dbname,
1038 vacrel->relnamespace,
1039 vacrel->relname,
1040 vacrel->num_index_scans);
1041 appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total), %u eagerly scanned\n"),
1042 vacrel->removed_pages,
1043 new_rel_pages,
1044 vacrel->scanned_pages,
1045 orig_rel_pages == 0 ? 100.0 :
1046 100.0 * vacrel->scanned_pages /
1047 orig_rel_pages,
1048 vacrel->eager_scanned_pages);
1050 _("tuples: %" PRId64 " removed, %" PRId64 " remain, %" PRId64 " are dead but not yet removable\n"),
1051 vacrel->tuples_deleted,
1052 (int64) vacrel->new_rel_tuples,
1053 vacrel->recently_dead_tuples);
1054 if (vacrel->missed_dead_tuples > 0)
1056 _("tuples missed: %" PRId64 " dead from %u pages not removed due to cleanup lock contention\n"),
1057 vacrel->missed_dead_tuples,
1058 vacrel->missed_dead_pages);
1059 diff = (int32) (ReadNextTransactionId() -
1060 vacrel->cutoffs.OldestXmin);
1062 _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
1063 vacrel->cutoffs.OldestXmin, diff);
1064 if (frozenxid_updated)
1065 {
1066 diff = (int32) (vacrel->NewRelfrozenXid -
1067 vacrel->cutoffs.relfrozenxid);
1069 _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
1070 vacrel->NewRelfrozenXid, diff);
1071 }
1072 if (minmulti_updated)
1073 {
1074 diff = (int32) (vacrel->NewRelminMxid -
1075 vacrel->cutoffs.relminmxid);
1077 _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
1078 vacrel->NewRelminMxid, diff);
1079 }
1080 appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %" PRId64 " tuples frozen\n"),
1081 vacrel->new_frozen_tuple_pages,
1082 orig_rel_pages == 0 ? 100.0 :
1083 100.0 * vacrel->new_frozen_tuple_pages /
1084 orig_rel_pages,
1085 vacrel->tuples_frozen);
1086
1088 _("visibility map: %u pages set all-visible, %u pages set all-frozen (%u were all-visible)\n"),
1089 vacrel->vm_new_visible_pages,
1091 vacrel->vm_new_frozen_pages,
1092 vacrel->vm_new_frozen_pages);
1093 if (vacrel->do_index_vacuuming)
1094 {
1095 if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
1096 appendStringInfoString(&buf, _("index scan not needed: "));
1097 else
1098 appendStringInfoString(&buf, _("index scan needed: "));
1099
1100 msgfmt = _("%u pages from table (%.2f%% of total) had %" PRId64 " dead item identifiers removed\n");
1101 }
1102 else
1103 {
1105 appendStringInfoString(&buf, _("index scan bypassed: "));
1106 else
1107 appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
1108
1109 msgfmt = _("%u pages from table (%.2f%% of total) have %" PRId64 " dead item identifiers\n");
1110 }
1111 appendStringInfo(&buf, msgfmt,
1112 vacrel->lpdead_item_pages,
1113 orig_rel_pages == 0 ? 100.0 :
1114 100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
1115 vacrel->lpdead_items);
1116 for (int i = 0; i < vacrel->nindexes; i++)
1117 {
1118 IndexBulkDeleteResult *istat = vacrel->indstats[i];
1119
1120 if (!istat)
1121 continue;
1122
1124 _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
1125 indnames[i],
1126 istat->num_pages,
1127 istat->pages_newly_deleted,
1128 istat->pages_deleted,
1129 istat->pages_free);
1130 }
1132 {
1133 /*
1134 * We bypass the changecount mechanism because this value is
1135 * only updated by the calling process. We also rely on the
1136 * above call to pgstat_progress_end_command() to not clear
1137 * the st_progress_param array.
1138 */
1139 appendStringInfo(&buf, _("delay time: %.3f ms\n"),
1141 }
1142 if (track_io_timing)
1143 {
1144 double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
1145 double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
1146
1147 appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
1148 read_ms, write_ms);
1149 }
1150 if (secs_dur > 0 || usecs_dur > 0)
1151 {
1152 read_rate = (double) BLCKSZ * total_blks_read /
1153 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1154 write_rate = (double) BLCKSZ * total_blks_dirtied /
1155 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1156 }
1157 appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
1158 read_rate, write_rate);
1160 _("buffer usage: %" PRId64 " hits, %" PRId64 " reads, %" PRId64 " dirtied\n"),
1161 total_blks_hit,
1162 total_blks_read,
1163 total_blks_dirtied);
1165 _("WAL usage: %" PRId64 " records, %" PRId64 " full page images, %" PRIu64 " bytes, %" PRIu64 " full page image bytes, %" PRId64 " buffers full\n"),
1166 walusage.wal_records,
1167 walusage.wal_fpi,
1168 walusage.wal_bytes,
1169 walusage.wal_fpi_bytes,
1170 walusage.wal_buffers_full);
1171 appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
1172
1173 ereport(verbose ? INFO : LOG,
1174 (errmsg_internal("%s", buf.data)));
1175 pfree(buf.data);
1176 }
1177 }
1178
1179 /* Cleanup index statistics and index names */
1180 for (int i = 0; i < vacrel->nindexes; i++)
1181 {
1182 if (vacrel->indstats[i])
1183 pfree(vacrel->indstats[i]);
1184
1185 if (instrument)
1186 pfree(indnames[i]);
1187 }
1188}
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1721
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1781
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1645
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
PgBackendStatus * MyBEEntry
bool track_io_timing
Definition: bufmgr.c:147
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:294
#define Max(x, y)
Definition: c.h:1010
int32_t int32
Definition: c.h:548
int64 TimestampTz
Definition: timestamp.h:39
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1170
ErrorContextCallback * error_context_stack
Definition: elog.c:95
#define _(x)
Definition: elog.c:91
#define LOG
Definition: elog.h:31
#define palloc_array(type, count)
Definition: fe_memutils.h:76
#define palloc0_object(type)
Definition: fe_memutils.h:75
Oid MyDatabaseId
Definition: globals.c:94
int verbose
WalUsage pgWalUsage
Definition: instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition: instrument.c:288
BufferUsage pgBufferUsage
Definition: instrument.c:20
void BufferUsageAccumDiff(BufferUsage *dst, const BufferUsage *add, const BufferUsage *sub)
Definition: instrument.c:248
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
char * get_database_name(Oid dbid)
Definition: lsyscache.c:1257
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3531
char * pstrdup(const char *in)
Definition: mcxt.c:1759
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc0(Size size)
Definition: mcxt.c:1395
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:2842
#define InvalidMultiXactId
Definition: multixact.h:25
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int64 PgStat_Counter
Definition: pgstat.h:67
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples, TimestampTz starttime)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4067
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:41
#define PROGRESS_VACUUM_MODE
Definition: progress.h:32
#define PROGRESS_VACUUM_MODE_NORMAL
Definition: progress.h:44
#define PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM
Definition: progress.h:50
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define PROGRESS_VACUUM_DELAY_TIME
Definition: progress.h:31
#define PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM_WRAPAROUND
Definition: progress.h:51
#define PROGRESS_VACUUM_STARTED_BY_MANUAL
Definition: progress.h:49
#define PROGRESS_VACUUM_STARTED_BY
Definition: progress.h:33
#define PROGRESS_VACUUM_MODE_AGGRESSIVE
Definition: progress.h:45
#define RelationGetRelationName(relation)
Definition: rel.h:549
#define RelationGetNamespace(relation)
Definition: rel.h:556
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:145
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:230
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
int64 shared_blks_dirtied
Definition: instrument.h:28
int64 local_blks_hit
Definition: instrument.h:30
int64 shared_blks_read
Definition: instrument.h:27
int64 local_blks_read
Definition: instrument.h:31
int64 local_blks_dirtied
Definition: instrument.h:32
int64 shared_blks_hit
Definition: instrument.h:26
struct ErrorContextCallback * previous
Definition: elog.h:297
void(* callback)(void *arg)
Definition: elog.h:298
BlockNumber pages_deleted
Definition: genam.h:109
BlockNumber pages_newly_deleted
Definition: genam.h:108
BlockNumber pages_free
Definition: genam.h:110
BlockNumber num_pages
Definition: genam.h:104
BlockNumber vm_new_frozen_pages
Definition: vacuumlazy.c:336
int64 tuples_deleted
Definition: vacuumlazy.c:351
bool do_rel_truncate
Definition: vacuumlazy.c:279
BlockNumber scanned_pages
Definition: vacuumlazy.c:313
BlockNumber new_frozen_tuple_pages
Definition: vacuumlazy.c:322
GlobalVisState * vistest
Definition: vacuumlazy.c:283
BlockNumber removed_pages
Definition: vacuumlazy.c:321
int num_index_scans
Definition: vacuumlazy.c:349
double new_live_tuples
Definition: vacuumlazy.c:344
double new_rel_tuples
Definition: vacuumlazy.c:343
TransactionId NewRelfrozenXid
Definition: vacuumlazy.c:285
bool consider_bypass_optimization
Definition: vacuumlazy.c:274
int64 recently_dead_tuples
Definition: vacuumlazy.c:355
int64 tuples_frozen
Definition: vacuumlazy.c:352
char * dbname
Definition: vacuumlazy.c:290
BlockNumber missed_dead_pages
Definition: vacuumlazy.c:339
char * relnamespace
Definition: vacuumlazy.c:291
int64 live_tuples
Definition: vacuumlazy.c:354
int64 lpdead_items
Definition: vacuumlazy.c:353
BlockNumber lpdead_item_pages
Definition: vacuumlazy.c:338
BlockNumber eager_scanned_pages
Definition: vacuumlazy.c:319
bool do_index_cleanup
Definition: vacuumlazy.c:278
MultiXactId NewRelminMxid
Definition: vacuumlazy.c:286
int64 missed_dead_tuples
Definition: vacuumlazy.c:356
BlockNumber vm_new_visible_pages
Definition: vacuumlazy.c:325
VacErrPhase phase
Definition: vacuumlazy.c:296
char * indname
Definition: vacuumlazy.c:293
BlockNumber vm_new_visible_frozen_pages
Definition: vacuumlazy.c:333
int64 st_progress_param[PGSTAT_NUM_PROGRESS_PARAM]
Form_pg_class rd_rel
Definition: rel.h:111
TransactionId OldestXmin
Definition: vacuum.h:279
MultiXactId OldestMxact
Definition: vacuum.h:280
int nworkers
Definition: vacuum.h:251
VacOptValue truncate
Definition: vacuum.h:236
bits32 options
Definition: vacuum.h:219
int log_vacuum_min_duration
Definition: vacuum.h:227
bool is_wraparound
Definition: vacuum.h:226
VacOptValue index_cleanup
Definition: vacuum.h:235
int64 wal_buffers_full
Definition: instrument.h:57
uint64 wal_bytes
Definition: instrument.h:55
int64 wal_fpi
Definition: instrument.h:54
uint64 wal_fpi_bytes
Definition: instrument.h:56
int64 wal_records
Definition: instrument.h:53
static TransactionId ReadNextTransactionId(void)
Definition: transam.h:377
static bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.h:282
bool track_cost_delay_timing
Definition: vacuum.c:82
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:2362
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:2405
bool VacuumFailsafeActive
Definition: vacuum.c:110
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, BlockNumber num_all_frozen_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
Definition: vacuum.c:1426
bool vacuum_get_cutoffs(Relation rel, const VacuumParams params, struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1100
#define VACOPT_VERBOSE
Definition: vacuum.h:182
@ VACOPTVALUE_AUTO
Definition: vacuum.h:203
@ VACOPTVALUE_ENABLED
Definition: vacuum.h:205
@ VACOPTVALUE_UNSPECIFIED
Definition: vacuum.h:202
@ VACOPTVALUE_DISABLED
Definition: vacuum.h:204
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition: vacuum.h:188
static void dead_items_cleanup(LVRelState *vacrel)
Definition: vacuumlazy.c:3641
static void update_relstats_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3847
static void heap_vacuum_eager_scan_setup(LVRelState *vacrel, const VacuumParams params)
Definition: vacuumlazy.c:500
static void vacuum_error_callback(void *arg)
Definition: vacuumlazy.c:3882
static void lazy_truncate_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:3258
static bool should_attempt_truncation(LVRelState *vacrel)
Definition: vacuumlazy.c:3238
static void lazy_scan_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:1227
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
Definition: vacuumlazy.c:3007
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
Definition: vacuumlazy.c:3532
bool IsInParallelMode(void)
Definition: xact.c:1090

References _, LVRelState::aggressive, AmAutoVacuumWorkerProcess, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert(), LVRelState::bstrategy, buf, BufferUsageAccumDiff(), ErrorContextCallback::callback, LVRelState::consider_bypass_optimization, LVRelState::cutoffs, LVRelState::dbname, dead_items_alloc(), dead_items_cleanup(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, LVRelState::eager_scanned_pages, ereport, errmsg(), errmsg_internal(), error_context_stack, VacuumCutoffs::FreezeLimit, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), GlobalVisTestFor(), heap_vacuum_eager_scan_setup(), i, VacuumParams::index_cleanup, LVRelState::indname, LVRelState::indrels, LVRelState::indstats, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsInParallelMode(), lazy_check_wraparound_failsafe(), lazy_scan_heap(), lazy_truncate_heap(), LVRelState::live_tuples, BufferUsage::local_blks_dirtied, BufferUsage::local_blks_hit, BufferUsage::local_blks_read, LOG, VacuumParams::log_vacuum_min_duration, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, Max, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, VacuumCutoffs::MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyBEEntry, MyDatabaseId, LVRelState::new_frozen_tuple_pages, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, NoLock, LVRelState::nonempty_pages, LVRelState::num_index_scans, IndexBulkDeleteResult::num_pages, VacuumParams::nworkers, VacuumCutoffs::OldestMxact, VacuumCutoffs::OldestXmin, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, palloc0(), palloc0_object, palloc_array, pfree(), pg_rusage_init(), pg_rusage_show(), pgBufferUsage, pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, LVRelState::phase, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_DELAY_TIME, PROGRESS_VACUUM_MODE, PROGRESS_VACUUM_MODE_AGGRESSIVE, PROGRESS_VACUUM_MODE_NORMAL, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, PROGRESS_VACUUM_STARTED_BY, PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM, PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM_WRAPAROUND, PROGRESS_VACUUM_STARTED_BY_MANUAL, pstrdup(), RelationData::rd_rel, ReadNextTransactionId(), LVRelState::recently_dead_tuples, LVRelState::rel, LVRelState::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, LVRelState::relname, LVRelState::relnamespace, LVRelState::removed_pages, RowExclusiveLock, LVRelState::scanned_pages, BufferUsage::shared_blks_dirtied, BufferUsage::shared_blks_hit, BufferUsage::shared_blks_read, should_attempt_truncation(), LVRelState::skippedallvis, LVRelState::skipwithvm, PgBackendStatus::st_progress_param, TimestampDifference(), TimestampDifferenceExceeds(), track_cost_delay_timing, track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelState::tuples_deleted, LVRelState::tuples_frozen, update_relstats_all_indexes(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_get_cutoffs(), VacuumFailsafeActive, LVRelState::verbose, verbose, visibilitymap_count(), LVRelState::vistest, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, WalUsage::wal_buffers_full, WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_fpi_bytes, WalUsage::wal_records, and WalUsageAccumDiff().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 3007 of file vacuumlazy.c.

3008{
3009 /* Don't warn more than once per VACUUM */
3011 return true;
3012
3014 {
3015 const int progress_index[] = {
3019 };
3020 int64 progress_val[3] = {0, 0, PROGRESS_VACUUM_MODE_FAILSAFE};
3021
3022 VacuumFailsafeActive = true;
3023
3024 /*
3025 * Abandon use of a buffer access strategy to allow use of all of
3026 * shared buffers. We assume the caller who allocated the memory for
3027 * the BufferAccessStrategy will free it.
3028 */
3029 vacrel->bstrategy = NULL;
3030
3031 /* Disable index vacuuming, index cleanup, and heap rel truncation */
3032 vacrel->do_index_vacuuming = false;
3033 vacrel->do_index_cleanup = false;
3034 vacrel->do_rel_truncate = false;
3035
3036 /* Reset the progress counters and set the failsafe mode */
3037 pgstat_progress_update_multi_param(3, progress_index, progress_val);
3038
3040 (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
3041 vacrel->dbname, vacrel->relnamespace, vacrel->relname,
3042 vacrel->num_index_scans),
3043 errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
3044 errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
3045 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
3046
3047 /* Stop applying cost limits from this point on */
3048 VacuumCostActive = false;
3050
3051 return true;
3052 }
3053
3054 return false;
3055}
#define unlikely(x)
Definition: c.h:418
int errdetail(const char *fmt,...)
Definition: elog.c:1216
int errhint(const char *fmt,...)
Definition: elog.c:1330
bool VacuumCostActive
Definition: globals.c:158
int VacuumCostBalance
Definition: globals.c:157
#define PROGRESS_VACUUM_MODE_FAILSAFE
Definition: progress.h:46
#define PROGRESS_VACUUM_INDEXES_PROCESSED
Definition: progress.h:30
#define PROGRESS_VACUUM_INDEXES_TOTAL
Definition: progress.h:29
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1268

References LVRelState::bstrategy, LVRelState::cutoffs, LVRelState::dbname, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errdetail(), errhint(), errmsg(), LVRelState::num_index_scans, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_MODE, PROGRESS_VACUUM_MODE_FAILSAFE, LVRelState::relname, LVRelState::relnamespace, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, VacuumFailsafeActive, and WARNING.

Referenced by heap_vacuum_rel(), lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 3061 of file vacuumlazy.c.

3062{
3063 double reltuples = vacrel->new_rel_tuples;
3064 bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
3065 const int progress_start_index[] = {
3068 };
3069 const int progress_end_index[] = {
3072 };
3073 int64 progress_start_val[2];
3074 int64 progress_end_val[2] = {0, 0};
3075
3076 Assert(vacrel->do_index_cleanup);
3077 Assert(vacrel->nindexes > 0);
3078
3079 /*
3080 * Report that we are now cleaning up indexes and the number of indexes to
3081 * cleanup.
3082 */
3083 progress_start_val[0] = PROGRESS_VACUUM_PHASE_INDEX_CLEANUP;
3084 progress_start_val[1] = vacrel->nindexes;
3085 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
3086
3087 if (!ParallelVacuumIsActive(vacrel))
3088 {
3089 for (int idx = 0; idx < vacrel->nindexes; idx++)
3090 {
3091 Relation indrel = vacrel->indrels[idx];
3092 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
3093
3094 vacrel->indstats[idx] =
3095 lazy_cleanup_one_index(indrel, istat, reltuples,
3096 estimated_count, vacrel);
3097
3098 /* Report the number of indexes cleaned up */
3100 idx + 1);
3101 }
3102 }
3103 else
3104 {
3105 /* Outsource everything to parallel variant */
3106 parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
3107 vacrel->num_index_scans,
3108 estimated_count);
3109 }
3110
3111 /* Reset the progress counters */
3112 pgstat_progress_update_multi_param(2, progress_end_index, progress_end_val);
3113}
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:262
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:39
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
Definition: vacuumlazy.c:3178
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)

References Assert(), LVRelState::do_index_cleanup, idx(), LVRelState::indrels, LVRelState::indstats, lazy_cleanup_one_index(), LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::num_index_scans, parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelState::pvs, LVRelState::rel_pages, and LVRelState::scanned_pages.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 3178 of file vacuumlazy.c.

3181{
3182 IndexVacuumInfo ivinfo;
3183 LVSavedErrInfo saved_err_info;
3184
3185 ivinfo.index = indrel;
3186 ivinfo.heaprel = vacrel->rel;
3187 ivinfo.analyze_only = false;
3188 ivinfo.report_progress = false;
3189 ivinfo.estimated_count = estimated_count;
3190 ivinfo.message_level = DEBUG2;
3191
3192 ivinfo.num_heap_tuples = reltuples;
3193 ivinfo.strategy = vacrel->bstrategy;
3194
3195 /*
3196 * Update error traceback information.
3197 *
3198 * The index name is saved during this phase and restored immediately
3199 * after this phase. See vacuum_error_callback.
3200 */
3201 Assert(vacrel->indname == NULL);
3202 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
3203 update_vacuum_error_info(vacrel, &saved_err_info,
3206
3207 istat = vac_cleanup_one_index(&ivinfo, istat);
3208
3209 /* Revert to the previous phase information for error traceback */
3210 restore_vacuum_error_info(vacrel, &saved_err_info);
3211 pfree(vacrel->indname);
3212 vacrel->indname = NULL;
3213
3214 return istat;
3215}
Relation index
Definition: genam.h:73
double num_heap_tuples
Definition: genam.h:79
bool analyze_only
Definition: genam.h:75
BufferAccessStrategy strategy
Definition: genam.h:80
Relation heaprel
Definition: genam.h:74
bool report_progress
Definition: genam.h:76
int message_level
Definition: genam.h:78
bool estimated_count
Definition: genam.h:77
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition: vacuum.c:2654
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
Definition: vacuumlazy.c:3965
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
Definition: vacuumlazy.c:3946

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel)
static

Definition at line 1227 of file vacuumlazy.c.

1228{
1229 ReadStream *stream;
1230 BlockNumber rel_pages = vacrel->rel_pages,
1231 blkno = 0,
1232 next_fsm_block_to_vacuum = 0;
1233 BlockNumber orig_eager_scan_success_limit =
1234 vacrel->eager_scan_remaining_successes; /* for logging */
1235 Buffer vmbuffer = InvalidBuffer;
1236 const int initprog_index[] = {
1240 };
1241 int64 initprog_val[3];
1242
1243 /* Report that we're scanning the heap, advertising total # of blocks */
1244 initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
1245 initprog_val[1] = rel_pages;
1246 initprog_val[2] = vacrel->dead_items_info->max_bytes;
1247 pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
1248
1249 /* Initialize for the first heap_vac_scan_next_block() call */
1252 vacrel->next_unskippable_allvis = false;
1253 vacrel->next_unskippable_eager_scanned = false;
1255
1256 /*
1257 * Set up the read stream for vacuum's first pass through the heap.
1258 *
1259 * This could be made safe for READ_STREAM_USE_BATCHING, but only with
1260 * explicit work in heap_vac_scan_next_block.
1261 */
1263 vacrel->bstrategy,
1264 vacrel->rel,
1267 vacrel,
1268 sizeof(uint8));
1269
1270 while (true)
1271 {
1272 Buffer buf;
1273 Page page;
1274 uint8 blk_info = 0;
1275 int ndeleted = 0;
1276 bool has_lpdead_items;
1277 void *per_buffer_data = NULL;
1278 bool vm_page_frozen = false;
1279 bool got_cleanup_lock = false;
1280
1281 vacuum_delay_point(false);
1282
1283 /*
1284 * Regularly check if wraparound failsafe should trigger.
1285 *
1286 * There is a similar check inside lazy_vacuum_all_indexes(), but
1287 * relfrozenxid might start to look dangerously old before we reach
1288 * that point. This check also provides failsafe coverage for the
1289 * one-pass strategy, and the two-pass strategy with the index_cleanup
1290 * param set to 'off'.
1291 */
1292 if (vacrel->scanned_pages > 0 &&
1293 vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0)
1295
1296 /*
1297 * Consider if we definitely have enough space to process TIDs on page
1298 * already. If we are close to overrunning the available space for
1299 * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
1300 * this page. However, let's force at least one page-worth of tuples
1301 * to be stored as to ensure we do at least some work when the memory
1302 * configured is so low that we run out before storing anything.
1303 */
1304 if (vacrel->dead_items_info->num_items > 0 &&
1306 {
1307 /*
1308 * Before beginning index vacuuming, we release any pin we may
1309 * hold on the visibility map page. This isn't necessary for
1310 * correctness, but we do it anyway to avoid holding the pin
1311 * across a lengthy, unrelated operation.
1312 */
1313 if (BufferIsValid(vmbuffer))
1314 {
1315 ReleaseBuffer(vmbuffer);
1316 vmbuffer = InvalidBuffer;
1317 }
1318
1319 /* Perform a round of index and heap vacuuming */
1320 vacrel->consider_bypass_optimization = false;
1321 lazy_vacuum(vacrel);
1322
1323 /*
1324 * Vacuum the Free Space Map to make newly-freed space visible on
1325 * upper-level FSM pages. Note that blkno is the previously
1326 * processed block.
1327 */
1328 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1329 blkno + 1);
1330 next_fsm_block_to_vacuum = blkno;
1331
1332 /* Report that we are once again scanning the heap */
1335 }
1336
1337 buf = read_stream_next_buffer(stream, &per_buffer_data);
1338
1339 /* The relation is exhausted. */
1340 if (!BufferIsValid(buf))
1341 break;
1342
1343 blk_info = *((uint8 *) per_buffer_data);
1345 page = BufferGetPage(buf);
1346 blkno = BufferGetBlockNumber(buf);
1347
1348 vacrel->scanned_pages++;
1349 if (blk_info & VAC_BLK_WAS_EAGER_SCANNED)
1350 vacrel->eager_scanned_pages++;
1351
1352 /* Report as block scanned, update error traceback information */
1355 blkno, InvalidOffsetNumber);
1356
1357 /*
1358 * Pin the visibility map page in case we need to mark the page
1359 * all-visible. In most cases this will be very cheap, because we'll
1360 * already have the correct page pinned anyway.
1361 */
1362 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
1363
1364 /*
1365 * We need a buffer cleanup lock to prune HOT chains and defragment
1366 * the page in lazy_scan_prune. But when it's not possible to acquire
1367 * a cleanup lock right away, we may be able to settle for reduced
1368 * processing using lazy_scan_noprune.
1369 */
1370 got_cleanup_lock = ConditionalLockBufferForCleanup(buf);
1371
1372 if (!got_cleanup_lock)
1374
1375 /* Check for new or empty pages before lazy_scan_[no]prune call */
1376 if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock,
1377 vmbuffer))
1378 {
1379 /* Processed as new/empty page (lock and pin released) */
1380 continue;
1381 }
1382
1383 /*
1384 * If we didn't get the cleanup lock, we can still collect LP_DEAD
1385 * items in the dead_items area for later vacuuming, count live and
1386 * recently dead tuples for vacuum logging, and determine if this
1387 * block could later be truncated. If we encounter any xid/mxids that
1388 * require advancing the relfrozenxid/relminxid, we'll have to wait
1389 * for a cleanup lock and call lazy_scan_prune().
1390 */
1391 if (!got_cleanup_lock &&
1392 !lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
1393 {
1394 /*
1395 * lazy_scan_noprune could not do all required processing. Wait
1396 * for a cleanup lock, and call lazy_scan_prune in the usual way.
1397 */
1398 Assert(vacrel->aggressive);
1401 got_cleanup_lock = true;
1402 }
1403
1404 /*
1405 * If we have a cleanup lock, we must now prune, freeze, and count
1406 * tuples. We may have acquired the cleanup lock originally, or we may
1407 * have gone back and acquired it after lazy_scan_noprune() returned
1408 * false. Either way, the page hasn't been processed yet.
1409 *
1410 * Like lazy_scan_noprune(), lazy_scan_prune() will count
1411 * recently_dead_tuples and live tuples for vacuum logging, determine
1412 * if the block can later be truncated, and accumulate the details of
1413 * remaining LP_DEAD line pointers on the page into dead_items. These
1414 * dead items include those pruned by lazy_scan_prune() as well as
1415 * line pointers previously marked LP_DEAD.
1416 */
1417 if (got_cleanup_lock)
1418 ndeleted = lazy_scan_prune(vacrel, buf, blkno, page,
1419 vmbuffer,
1421 &has_lpdead_items, &vm_page_frozen);
1422
1423 /*
1424 * Count an eagerly scanned page as a failure or a success.
1425 *
1426 * Only lazy_scan_prune() freezes pages, so if we didn't get the
1427 * cleanup lock, we won't have frozen the page. However, we only count
1428 * pages that were too new to require freezing as eager freeze
1429 * failures.
1430 *
1431 * We could gather more information from lazy_scan_noprune() about
1432 * whether or not there were tuples with XIDs or MXIDs older than the
1433 * FreezeLimit or MultiXactCutoff. However, for simplicity, we simply
1434 * exclude pages skipped due to cleanup lock contention from eager
1435 * freeze algorithm caps.
1436 */
1437 if (got_cleanup_lock &&
1438 (blk_info & VAC_BLK_WAS_EAGER_SCANNED))
1439 {
1440 /* Aggressive vacuums do not eager scan. */
1441 Assert(!vacrel->aggressive);
1442
1443 if (vm_page_frozen)
1444 {
1445 if (vacrel->eager_scan_remaining_successes > 0)
1447
1448 if (vacrel->eager_scan_remaining_successes == 0)
1449 {
1450 /*
1451 * Report only once that we disabled eager scanning. We
1452 * may eagerly read ahead blocks in excess of the success
1453 * or failure caps before attempting to freeze them, so we
1454 * could reach here even after disabling additional eager
1455 * scanning.
1456 */
1457 if (vacrel->eager_scan_max_fails_per_region > 0)
1458 ereport(vacrel->verbose ? INFO : DEBUG2,
1459 (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of relation \"%s.%s.%s\"",
1460 orig_eager_scan_success_limit,
1461 vacrel->dbname, vacrel->relnamespace,
1462 vacrel->relname)));
1463
1464 /*
1465 * If we hit our success cap, permanently disable eager
1466 * scanning by setting the other eager scan management
1467 * fields to their disabled values.
1468 */
1469 vacrel->eager_scan_remaining_fails = 0;
1472 }
1473 }
1474 else if (vacrel->eager_scan_remaining_fails > 0)
1476 }
1477
1478 /*
1479 * Now drop the buffer lock and, potentially, update the FSM.
1480 *
1481 * Our goal is to update the freespace map the last time we touch the
1482 * page. If we'll process a block in the second pass, we may free up
1483 * additional space on the page, so it is better to update the FSM
1484 * after the second pass. If the relation has no indexes, or if index
1485 * vacuuming is disabled, there will be no second heap pass; if this
1486 * particular page has no dead items, the second heap pass will not
1487 * touch this page. So, in those cases, update the FSM now.
1488 *
1489 * Note: In corner cases, it's possible to miss updating the FSM
1490 * entirely. If index vacuuming is currently enabled, we'll skip the
1491 * FSM update now. But if failsafe mode is later activated, or there
1492 * are so few dead tuples that index vacuuming is bypassed, there will
1493 * also be no opportunity to update the FSM later, because we'll never
1494 * revisit this page. Since updating the FSM is desirable but not
1495 * absolutely required, that's OK.
1496 */
1497 if (vacrel->nindexes == 0
1498 || !vacrel->do_index_vacuuming
1499 || !has_lpdead_items)
1500 {
1501 Size freespace = PageGetHeapFreeSpace(page);
1502
1504 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1505
1506 /*
1507 * Periodically perform FSM vacuuming to make newly-freed space
1508 * visible on upper FSM pages. This is done after vacuuming if the
1509 * table has indexes. There will only be newly-freed space if we
1510 * held the cleanup lock and lazy_scan_prune() was called.
1511 */
1512 if (got_cleanup_lock && vacrel->nindexes == 0 && ndeleted > 0 &&
1513 blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1514 {
1515 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1516 blkno);
1517 next_fsm_block_to_vacuum = blkno;
1518 }
1519 }
1520 else
1522 }
1523
1524 vacrel->blkno = InvalidBlockNumber;
1525 if (BufferIsValid(vmbuffer))
1526 ReleaseBuffer(vmbuffer);
1527
1528 /*
1529 * Report that everything is now scanned. We never skip scanning the last
1530 * block in the relation, so we can pass rel_pages here.
1531 */
1533 rel_pages);
1534
1535 /* now we can compute the new value for pg_class.reltuples */
1536 vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1537 vacrel->scanned_pages,
1538 vacrel->live_tuples);
1539
1540 /*
1541 * Also compute the total number of surviving heap entries. In the
1542 * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1543 */
1544 vacrel->new_rel_tuples =
1545 Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1546 vacrel->missed_dead_tuples;
1547
1548 read_stream_end(stream);
1549
1550 /*
1551 * Do index vacuuming (call each index's ambulkdelete routine), then do
1552 * related heap vacuuming
1553 */
1554 if (vacrel->dead_items_info->num_items > 0)
1555 lazy_vacuum(vacrel);
1556
1557 /*
1558 * Vacuum the remainder of the Free Space Map. We must do this whether or
1559 * not there were indexes, and whether or not we bypassed index vacuuming.
1560 * We can pass rel_pages here because we never skip scanning the last
1561 * block of the relation.
1562 */
1563 if (rel_pages > next_fsm_block_to_vacuum)
1564 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, rel_pages);
1565
1566 /* report all blocks vacuumed */
1568
1569 /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1570 if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1572}
void CheckBufferIsPinnedOnce(Buffer buffer)
Definition: bufmgr.c:5651
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5684
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5857
@ BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:205
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:990
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:377
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:194
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:36
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES
Definition: progress.h:26
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
Definition: read_stream.c:791
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
Definition: read_stream.c:737
void read_stream_end(ReadStream *stream)
Definition: read_stream.c:1089
#define READ_STREAM_MAINTENANCE
Definition: read_stream.h:28
BlockNumber blkno
Definition: vacuumlazy.c:294
void vacuum_delay_point(bool is_analyze)
Definition: vacuum.c:2426
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1330
static int lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items, bool *vm_page_frozen)
Definition: vacuumlazy.c:1971
static BlockNumber heap_vac_scan_next_block(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: vacuumlazy.c:1599
static void lazy_vacuum(LVRelState *vacrel)
Definition: vacuumlazy.c:2488
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3061
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
Definition: vacuumlazy.c:2277
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
Definition: vacuumlazy.c:1836
#define FAILSAFE_EVERY_PAGES
Definition: vacuumlazy.c:192
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:201
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)

References LVRelState::aggressive, Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), CheckBufferIsPinnedOnce(), ConditionalLockBufferForCleanup(), LVRelState::consider_bypass_optimization, LVRelState::current_block, LVRelState::dbname, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::eager_scan_max_fails_per_region, LVRelState::eager_scan_remaining_fails, LVRelState::eager_scan_remaining_successes, LVRelState::eager_scanned_pages, ereport, errmsg(), FAILSAFE_EVERY_PAGES, FreeSpaceMapVacuumRange(), heap_vac_scan_next_block(), INFO, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_new_or_empty(), lazy_scan_noprune(), lazy_scan_prune(), lazy_vacuum(), LVRelState::live_tuples, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, Max, VacDeadItemsInfo::max_bytes, LVRelState::missed_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::next_eager_scan_region_start, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::nindexes, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), LVRelState::recently_dead_tuples, RecordPageWithFreeSpace(), LVRelState::rel, LVRelState::rel_pages, ReleaseBuffer(), LVRelState::relname, LVRelState::relnamespace, LVRelState::scanned_pages, TidStoreMemoryUsage(), UnlockReleaseBuffer(), update_vacuum_error_info(), VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM, VAC_BLK_WAS_EAGER_SCANNED, vac_estimate_reltuples(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, LVRelState::verbose, and visibilitymap_pin().

Referenced by heap_vacuum_rel().

◆ lazy_scan_new_or_empty()

static bool lazy_scan_new_or_empty ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool  sharelock,
Buffer  vmbuffer 
)
static

Definition at line 1836 of file vacuumlazy.c.

1838{
1839 Size freespace;
1840
1841 if (PageIsNew(page))
1842 {
1843 /*
1844 * All-zeroes pages can be left over if either a backend extends the
1845 * relation by a single page, but crashes before the newly initialized
1846 * page has been written out, or when bulk-extending the relation
1847 * (which creates a number of empty pages at the tail end of the
1848 * relation), and then enters them into the FSM.
1849 *
1850 * Note we do not enter the page into the visibilitymap. That has the
1851 * downside that we repeatedly visit this page in subsequent vacuums,
1852 * but otherwise we'll never discover the space on a promoted standby.
1853 * The harm of repeated checking ought to normally not be too bad. The
1854 * space usually should be used at some point, otherwise there
1855 * wouldn't be any regular vacuums.
1856 *
1857 * Make sure these pages are in the FSM, to ensure they can be reused.
1858 * Do that by testing if there's any space recorded for the page. If
1859 * not, enter it. We do so after releasing the lock on the heap page,
1860 * the FSM is approximate, after all.
1861 */
1863
1864 if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1865 {
1866 freespace = BLCKSZ - SizeOfPageHeaderData;
1867
1868 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1869 }
1870
1871 return true;
1872 }
1873
1874 if (PageIsEmpty(page))
1875 {
1876 /*
1877 * It seems likely that caller will always be able to get a cleanup
1878 * lock on an empty page. But don't take any chances -- escalate to
1879 * an exclusive lock (still don't need a cleanup lock, though).
1880 */
1881 if (sharelock)
1882 {
1885
1886 if (!PageIsEmpty(page))
1887 {
1888 /* page isn't new or empty -- keep lock and pin for now */
1889 return false;
1890 }
1891 }
1892 else
1893 {
1894 /* Already have a full cleanup lock (which is more than enough) */
1895 }
1896
1897 /*
1898 * Unlike new pages, empty pages are always set all-visible and
1899 * all-frozen.
1900 */
1901 if (!PageIsAllVisible(page))
1902 {
1904
1905 /* mark buffer dirty before writing a WAL record */
1907
1908 /*
1909 * It's possible that another backend has extended the heap,
1910 * initialized the page, and then failed to WAL-log the page due
1911 * to an ERROR. Since heap extension is not WAL-logged, recovery
1912 * might try to replay our record setting the page all-visible and
1913 * find that the page isn't initialized, which will cause a PANIC.
1914 * To prevent that, check whether the page has been previously
1915 * WAL-logged, and if not, do that now.
1916 */
1917 if (RelationNeedsWAL(vacrel->rel) &&
1919 log_newpage_buffer(buf, true);
1920
1921 PageSetAllVisible(page);
1922 visibilitymap_set(vacrel->rel, blkno, buf,
1924 vmbuffer, InvalidTransactionId,
1928
1929 /* Count the newly all-frozen pages for logging */
1930 vacrel->vm_new_visible_pages++;
1932 }
1933
1934 freespace = PageGetHeapFreeSpace(page);
1936 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1937 return true;
1938 }
1939
1940 /* page isn't new or empty -- keep lock and pin */
1941 return false;
1942}
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2943
@ BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:207
static bool PageIsAllVisible(const PageData *page)
Definition: bufpage.h:428
#define SizeOfPageHeaderData
Definition: bufpage.h:216
static void PageSetAllVisible(Page page)
Definition: bufpage.h:433
static XLogRecPtr PageGetLSN(const PageData *page)
Definition: bufpage.h:385
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:244
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
#define RelationNeedsWAL(relation)
Definition: rel.h:638
uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define XLogRecPtrIsValid(r)
Definition: xlogdefs.h:29
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1259

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, END_CRIT_SECTION, GetRecordedFreeSpace(), InvalidTransactionId, InvalidXLogRecPtr, LockBuffer(), log_newpage_buffer(), MarkBufferDirty(), PageGetHeapFreeSpace(), PageGetLSN(), PageIsAllVisible(), PageIsEmpty(), PageIsNew(), PageSetAllVisible(), RecordPageWithFreeSpace(), LVRelState::rel, RelationNeedsWAL, SizeOfPageHeaderData, START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, and XLogRecPtrIsValid.

Referenced by lazy_scan_heap().

◆ lazy_scan_noprune()

static bool lazy_scan_noprune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool *  has_lpdead_items 
)
static

Definition at line 2277 of file vacuumlazy.c.

2282{
2283 OffsetNumber offnum,
2284 maxoff;
2285 int lpdead_items,
2286 live_tuples,
2287 recently_dead_tuples,
2288 missed_dead_tuples;
2289 bool hastup;
2290 HeapTupleHeader tupleheader;
2291 TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
2292 MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
2294
2295 Assert(BufferGetBlockNumber(buf) == blkno);
2296
2297 hastup = false; /* for now */
2298
2299 lpdead_items = 0;
2300 live_tuples = 0;
2301 recently_dead_tuples = 0;
2302 missed_dead_tuples = 0;
2303
2304 maxoff = PageGetMaxOffsetNumber(page);
2305 for (offnum = FirstOffsetNumber;
2306 offnum <= maxoff;
2307 offnum = OffsetNumberNext(offnum))
2308 {
2309 ItemId itemid;
2310 HeapTupleData tuple;
2311
2312 vacrel->offnum = offnum;
2313 itemid = PageGetItemId(page, offnum);
2314
2315 if (!ItemIdIsUsed(itemid))
2316 continue;
2317
2318 if (ItemIdIsRedirected(itemid))
2319 {
2320 hastup = true;
2321 continue;
2322 }
2323
2324 if (ItemIdIsDead(itemid))
2325 {
2326 /*
2327 * Deliberately don't set hastup=true here. See same point in
2328 * lazy_scan_prune for an explanation.
2329 */
2330 deadoffsets[lpdead_items++] = offnum;
2331 continue;
2332 }
2333
2334 hastup = true; /* page prevents rel truncation */
2335 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
2336 if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
2337 &NoFreezePageRelfrozenXid,
2338 &NoFreezePageRelminMxid))
2339 {
2340 /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
2341 if (vacrel->aggressive)
2342 {
2343 /*
2344 * Aggressive VACUUMs must always be able to advance rel's
2345 * relfrozenxid to a value >= FreezeLimit (and be able to
2346 * advance rel's relminmxid to a value >= MultiXactCutoff).
2347 * The ongoing aggressive VACUUM won't be able to do that
2348 * unless it can freeze an XID (or MXID) from this tuple now.
2349 *
2350 * The only safe option is to have caller perform processing
2351 * of this page using lazy_scan_prune. Caller might have to
2352 * wait a while for a cleanup lock, but it can't be helped.
2353 */
2354 vacrel->offnum = InvalidOffsetNumber;
2355 return false;
2356 }
2357
2358 /*
2359 * Non-aggressive VACUUMs are under no obligation to advance
2360 * relfrozenxid (even by one XID). We can be much laxer here.
2361 *
2362 * Currently we always just accept an older final relfrozenxid
2363 * and/or relminmxid value. We never make caller wait or work a
2364 * little harder, even when it likely makes sense to do so.
2365 */
2366 }
2367
2368 ItemPointerSet(&(tuple.t_self), blkno, offnum);
2369 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2370 tuple.t_len = ItemIdGetLength(itemid);
2371 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
2372
2373 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
2374 buf))
2375 {
2377 case HEAPTUPLE_LIVE:
2378
2379 /*
2380 * Count both cases as live, just like lazy_scan_prune
2381 */
2382 live_tuples++;
2383
2384 break;
2385 case HEAPTUPLE_DEAD:
2386
2387 /*
2388 * There is some useful work for pruning to do, that won't be
2389 * done due to failure to get a cleanup lock.
2390 */
2391 missed_dead_tuples++;
2392 break;
2394
2395 /*
2396 * Count in recently_dead_tuples, just like lazy_scan_prune
2397 */
2398 recently_dead_tuples++;
2399 break;
2401
2402 /*
2403 * Do not count these rows as live, just like lazy_scan_prune
2404 */
2405 break;
2406 default:
2407 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2408 break;
2409 }
2410 }
2411
2412 vacrel->offnum = InvalidOffsetNumber;
2413
2414 /*
2415 * By here we know for sure that caller can put off freezing and pruning
2416 * this particular page until the next VACUUM. Remember its details now.
2417 * (lazy_scan_prune expects a clean slate, so we have to do this last.)
2418 */
2419 vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
2420 vacrel->NewRelminMxid = NoFreezePageRelminMxid;
2421
2422 /* Save any LP_DEAD items found on the page in dead_items */
2423 if (vacrel->nindexes == 0)
2424 {
2425 /* Using one-pass strategy (since table has no indexes) */
2426 if (lpdead_items > 0)
2427 {
2428 /*
2429 * Perfunctory handling for the corner case where a single pass
2430 * strategy VACUUM cannot get a cleanup lock, and it turns out
2431 * that there is one or more LP_DEAD items: just count the LP_DEAD
2432 * items as missed_dead_tuples instead. (This is a bit dishonest,
2433 * but it beats having to maintain specialized heap vacuuming code
2434 * forever, for vanishingly little benefit.)
2435 */
2436 hastup = true;
2437 missed_dead_tuples += lpdead_items;
2438 }
2439 }
2440 else if (lpdead_items > 0)
2441 {
2442 /*
2443 * Page has LP_DEAD items, and so any references/TIDs that remain in
2444 * indexes will be deleted during index vacuuming (and then marked
2445 * LP_UNUSED in the heap)
2446 */
2447 vacrel->lpdead_item_pages++;
2448
2449 dead_items_add(vacrel, blkno, deadoffsets, lpdead_items);
2450
2451 vacrel->lpdead_items += lpdead_items;
2452 }
2453
2454 /*
2455 * Finally, add relevant page-local counts to whole-VACUUM counts
2456 */
2457 vacrel->live_tuples += live_tuples;
2458 vacrel->recently_dead_tuples += recently_dead_tuples;
2459 vacrel->missed_dead_tuples += missed_dead_tuples;
2460 if (missed_dead_tuples > 0)
2461 vacrel->missed_dead_pages++;
2462
2463 /* Can't truncate this page */
2464 if (hastup)
2465 vacrel->nonempty_pages = blkno + 1;
2466
2467 /* Did we find LP_DEAD items? */
2468 *has_lpdead_items = (lpdead_items > 0);
2469
2470 /* Caller won't need to call lazy_scan_prune with same page */
2471 return true;
2472}
TransactionId MultiXactId
Definition: c.h:681
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition: heapam.c:7891
#define MaxHeapTuplesPerPage
Definition: htup_details.h:624
OffsetNumber offnum
Definition: vacuumlazy.c:295
static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: vacuumlazy.c:3597

References LVRelState::aggressive, Assert(), buf, BufferGetBlockNumber(), LVRelState::cutoffs, dead_items_add(), elog, ERROR, FirstOffsetNumber, heap_tuple_should_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by lazy_scan_heap().

◆ lazy_scan_prune()

static int lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
Buffer  vmbuffer,
bool  all_visible_according_to_vm,
bool *  has_lpdead_items,
bool *  vm_page_frozen 
)
static

Definition at line 1971 of file vacuumlazy.c.

1979{
1980 Relation rel = vacrel->rel;
1981 PruneFreezeResult presult;
1982 PruneFreezeParams params = {
1983 .relation = rel,
1984 .buffer = buf,
1985 .reason = PRUNE_VACUUM_SCAN,
1986 .options = HEAP_PAGE_PRUNE_FREEZE,
1987 .vistest = vacrel->vistest,
1988 .cutoffs = &vacrel->cutoffs,
1989 };
1990
1991 Assert(BufferGetBlockNumber(buf) == blkno);
1992
1993 /*
1994 * Prune all HOT-update chains and potentially freeze tuples on this page.
1995 *
1996 * If the relation has no indexes, we can immediately mark would-be dead
1997 * items LP_UNUSED.
1998 *
1999 * The number of tuples removed from the page is returned in
2000 * presult.ndeleted. It should not be confused with presult.lpdead_items;
2001 * presult.lpdead_items's final value can be thought of as the number of
2002 * tuples that were deleted from indexes.
2003 *
2004 * We will update the VM after collecting LP_DEAD items and freezing
2005 * tuples. Pruning will have determined whether or not the page is
2006 * all-visible.
2007 */
2008 if (vacrel->nindexes == 0)
2010
2012 &presult,
2013 &vacrel->offnum,
2014 &vacrel->NewRelfrozenXid, &vacrel->NewRelminMxid);
2015
2018
2019 if (presult.nfrozen > 0)
2020 {
2021 /*
2022 * We don't increment the new_frozen_tuple_pages instrumentation
2023 * counter when nfrozen == 0, since it only counts pages with newly
2024 * frozen tuples (don't confuse that with pages newly set all-frozen
2025 * in VM).
2026 */
2027 vacrel->new_frozen_tuple_pages++;
2028 }
2029
2030 /*
2031 * VACUUM will call heap_page_is_all_visible() during the second pass over
2032 * the heap to determine all_visible and all_frozen for the page -- this
2033 * is a specialized version of the logic from this function. Now that
2034 * we've finished pruning and freezing, make sure that we're in total
2035 * agreement with heap_page_is_all_visible() using an assertion.
2036 */
2037#ifdef USE_ASSERT_CHECKING
2038 if (presult.all_visible)
2039 {
2040 TransactionId debug_cutoff;
2041 bool debug_all_frozen;
2042
2043 Assert(presult.lpdead_items == 0);
2044
2045 Assert(heap_page_is_all_visible(vacrel->rel, buf,
2046 vacrel->cutoffs.OldestXmin, &debug_all_frozen,
2047 &debug_cutoff, &vacrel->offnum));
2048
2049 Assert(presult.all_frozen == debug_all_frozen);
2050
2051 Assert(!TransactionIdIsValid(debug_cutoff) ||
2052 debug_cutoff == presult.vm_conflict_horizon);
2053 }
2054#endif
2055
2056 /*
2057 * Now save details of the LP_DEAD items from the page in vacrel
2058 */
2059 if (presult.lpdead_items > 0)
2060 {
2061 vacrel->lpdead_item_pages++;
2062
2063 /*
2064 * deadoffsets are collected incrementally in
2065 * heap_page_prune_and_freeze() as each dead line pointer is recorded,
2066 * with an indeterminate order, but dead_items_add requires them to be
2067 * sorted.
2068 */
2069 qsort(presult.deadoffsets, presult.lpdead_items, sizeof(OffsetNumber),
2071
2072 dead_items_add(vacrel, blkno, presult.deadoffsets, presult.lpdead_items);
2073 }
2074
2075 /* Finally, add page-local counts to whole-VACUUM counts */
2076 vacrel->tuples_deleted += presult.ndeleted;
2077 vacrel->tuples_frozen += presult.nfrozen;
2078 vacrel->lpdead_items += presult.lpdead_items;
2079 vacrel->live_tuples += presult.live_tuples;
2080 vacrel->recently_dead_tuples += presult.recently_dead_tuples;
2081
2082 /* Can't truncate this page */
2083 if (presult.hastup)
2084 vacrel->nonempty_pages = blkno + 1;
2085
2086 /* Did we find LP_DEAD items? */
2087 *has_lpdead_items = (presult.lpdead_items > 0);
2088
2089 Assert(!presult.all_visible || !(*has_lpdead_items));
2090 Assert(!presult.all_frozen || presult.all_visible);
2091
2092 /*
2093 * Handle setting visibility map bit based on information from the VM (as
2094 * of last heap_vac_scan_next_block() call), and from all_visible and
2095 * all_frozen variables
2096 */
2097 if (!all_visible_according_to_vm && presult.all_visible)
2098 {
2099 uint8 old_vmbits;
2101
2102 if (presult.all_frozen)
2103 {
2105 flags |= VISIBILITYMAP_ALL_FROZEN;
2106 }
2107
2108 /*
2109 * It should never be the case that the visibility map page is set
2110 * while the page-level bit is clear, but the reverse is allowed (if
2111 * checksums are not enabled). Regardless, set both bits so that we
2112 * get back in sync.
2113 *
2114 * NB: If the heap page is all-visible but the VM bit is not set, we
2115 * don't need to dirty the heap page. However, if checksums are
2116 * enabled, we do need to make sure that the heap page is dirtied
2117 * before passing it to visibilitymap_set(), because it may be logged.
2118 * Given that this situation should only happen in rare cases after a
2119 * crash, it is not worth optimizing.
2120 */
2121 PageSetAllVisible(page);
2123 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
2125 vmbuffer, presult.vm_conflict_horizon,
2126 flags);
2127
2128 /*
2129 * If the page wasn't already set all-visible and/or all-frozen in the
2130 * VM, count it as newly set for logging.
2131 */
2132 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2133 {
2134 vacrel->vm_new_visible_pages++;
2135 if (presult.all_frozen)
2136 {
2138 *vm_page_frozen = true;
2139 }
2140 }
2141 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
2142 presult.all_frozen)
2143 {
2144 vacrel->vm_new_frozen_pages++;
2145 *vm_page_frozen = true;
2146 }
2147 }
2148
2149 /*
2150 * As of PostgreSQL 9.2, the visibility map bit should never be set if the
2151 * page-level bit is clear. However, it's possible that the bit got
2152 * cleared after heap_vac_scan_next_block() was called, so we must recheck
2153 * with buffer lock before concluding that the VM is corrupt.
2154 */
2155 else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
2156 visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
2157 {
2160 errmsg("page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
2161 vacrel->relname, blkno)));
2162
2163 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
2165 }
2166
2167 /*
2168 * It's possible for the value returned by
2169 * GetOldestNonRemovableTransactionId() to move backwards, so it's not
2170 * wrong for us to see tuples that appear to not be visible to everyone
2171 * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
2172 * never moves backwards, but GetOldestNonRemovableTransactionId() is
2173 * conservative and sometimes returns a value that's unnecessarily small,
2174 * so if we see that contradiction it just means that the tuples that we
2175 * think are not visible to everyone yet actually are, and the
2176 * PD_ALL_VISIBLE flag is correct.
2177 *
2178 * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
2179 * however.
2180 */
2181 else if (presult.lpdead_items > 0 && PageIsAllVisible(page))
2182 {
2185 errmsg("page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
2186 vacrel->relname, blkno)));
2187
2188 PageClearAllVisible(page);
2190 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
2192 }
2193
2194 /*
2195 * If the all-visible page is all-frozen but not marked as such yet, mark
2196 * it as all-frozen.
2197 */
2198 else if (all_visible_according_to_vm && presult.all_frozen &&
2199 !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
2200 {
2201 uint8 old_vmbits;
2202
2203 /*
2204 * Avoid relying on all_visible_according_to_vm as a proxy for the
2205 * page-level PD_ALL_VISIBLE bit being set, since it might have become
2206 * stale -- even when all_visible is set
2207 */
2208 if (!PageIsAllVisible(page))
2209 {
2210 PageSetAllVisible(page);
2212 }
2213
2214 /*
2215 * Set the page all-frozen (and all-visible) in the VM.
2216 *
2217 * We can pass InvalidTransactionId as our cutoff_xid, since a
2218 * snapshotConflictHorizon sufficient to make everything safe for REDO
2219 * was logged when the page's tuples were frozen.
2220 */
2222 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
2224 vmbuffer, InvalidTransactionId,
2227
2228 /*
2229 * The page was likely already set all-visible in the VM. However,
2230 * there is a small chance that it was modified sometime between
2231 * setting all_visible_according_to_vm and checking the visibility
2232 * during pruning. Check the return value of old_vmbits anyway to
2233 * ensure the visibility map counters used for logging are accurate.
2234 */
2235 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2236 {
2237 vacrel->vm_new_visible_pages++;
2239 *vm_page_frozen = true;
2240 }
2241
2242 /*
2243 * We already checked that the page was not set all-frozen in the VM
2244 * above, so we don't need to test the value of old_vmbits.
2245 */
2246 else
2247 {
2248 vacrel->vm_new_frozen_pages++;
2249 *vm_page_frozen = true;
2250 }
2251 }
2252
2253 return presult.ndeleted;
2254}
static void PageClearAllVisible(Page page)
Definition: bufpage.h:438
int errcode(int sqlerrcode)
Definition: elog.c:863
#define HEAP_PAGE_PRUNE_FREEZE
Definition: heapam.h:44
@ PRUNE_VACUUM_SCAN
Definition: heapam.h:229
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
Definition: heapam.h:43
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:42
#define qsort(a, b, c, d)
Definition: port.h:499
void heap_page_prune_and_freeze(PruneFreezeParams *params, PruneFreezeResult *presult, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
Definition: pruneheap.c:809
Relation relation
Definition: heapam.h:238
int recently_dead_tuples
Definition: heapam.h:285
TransactionId vm_conflict_horizon
Definition: heapam.h:300
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]
Definition: heapam.h:314
bool all_visible
Definition: heapam.h:298
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static int cmpOffsetNumbers(const void *a, const void *b)
Definition: vacuumlazy.c:1946
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:27
#define VISIBILITYMAP_VALID_BITS

References PruneFreezeResult::all_frozen, PruneFreezeResult::all_visible, Assert(), buf, BufferGetBlockNumber(), cmpOffsetNumbers(), LVRelState::cutoffs, dead_items_add(), PruneFreezeResult::deadoffsets, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), PruneFreezeResult::hastup, heap_page_prune_and_freeze(), HEAP_PAGE_PRUNE_FREEZE, HEAP_PAGE_PRUNE_MARK_UNUSED_NOW, InvalidTransactionId, InvalidXLogRecPtr, LVRelState::live_tuples, PruneFreezeResult::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, PruneFreezeResult::lpdead_items, MarkBufferDirty(), MultiXactIdIsValid, PruneFreezeResult::ndeleted, LVRelState::new_frozen_tuple_pages, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, PruneFreezeResult::nfrozen, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, VacuumCutoffs::OldestXmin, PruneFreezeParams::options, PageClearAllVisible(), PageIsAllVisible(), PageSetAllVisible(), PRUNE_VACUUM_SCAN, qsort, LVRelState::recently_dead_tuples, PruneFreezeResult::recently_dead_tuples, LVRelState::rel, PruneFreezeParams::relation, LVRelState::relname, TransactionIdIsValid, LVRelState::tuples_deleted, LVRelState::tuples_frozen, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, LVRelState::vistest, VM_ALL_FROZEN, PruneFreezeResult::vm_conflict_horizon, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, and WARNING.

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 3258 of file vacuumlazy.c.

3259{
3260 BlockNumber orig_rel_pages = vacrel->rel_pages;
3261 BlockNumber new_rel_pages;
3262 bool lock_waiter_detected;
3263 int lock_retry;
3264
3265 /* Report that we are now truncating */
3268
3269 /* Update error traceback information one last time */
3272
3273 /*
3274 * Loop until no more truncating can be done.
3275 */
3276 do
3277 {
3278 /*
3279 * We need full exclusive lock on the relation in order to do
3280 * truncation. If we can't get it, give up rather than waiting --- we
3281 * don't want to block other backends, and we don't want to deadlock
3282 * (which is quite possible considering we already hold a lower-grade
3283 * lock).
3284 */
3285 lock_waiter_detected = false;
3286 lock_retry = 0;
3287 while (true)
3288 {
3290 break;
3291
3292 /*
3293 * Check for interrupts while trying to (re-)acquire the exclusive
3294 * lock.
3295 */
3297
3298 if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
3300 {
3301 /*
3302 * We failed to establish the lock in the specified number of
3303 * retries. This means we give up truncating.
3304 */
3305 ereport(vacrel->verbose ? INFO : DEBUG2,
3306 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
3307 vacrel->relname)));
3308 return;
3309 }
3310
3311 (void) WaitLatch(MyLatch,
3314 WAIT_EVENT_VACUUM_TRUNCATE);
3316 }
3317
3318 /*
3319 * Now that we have exclusive lock, look to see if the rel has grown
3320 * whilst we were vacuuming with non-exclusive lock. If so, give up;
3321 * the newly added pages presumably contain non-deletable tuples.
3322 */
3323 new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
3324 if (new_rel_pages != orig_rel_pages)
3325 {
3326 /*
3327 * Note: we intentionally don't update vacrel->rel_pages with the
3328 * new rel size here. If we did, it would amount to assuming that
3329 * the new pages are empty, which is unlikely. Leaving the numbers
3330 * alone amounts to assuming that the new pages have the same
3331 * tuple density as existing ones, which is less unlikely.
3332 */
3334 return;
3335 }
3336
3337 /*
3338 * Scan backwards from the end to verify that the end pages actually
3339 * contain no tuples. This is *necessary*, not optional, because
3340 * other backends could have added tuples to these pages whilst we
3341 * were vacuuming.
3342 */
3343 new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
3344 vacrel->blkno = new_rel_pages;
3345
3346 if (new_rel_pages >= orig_rel_pages)
3347 {
3348 /* can't do anything after all */
3350 return;
3351 }
3352
3353 /*
3354 * Okay to truncate.
3355 */
3356 RelationTruncate(vacrel->rel, new_rel_pages);
3357
3358 /*
3359 * We can release the exclusive lock as soon as we have truncated.
3360 * Other backends can't safely access the relation until they have
3361 * processed the smgr invalidation that smgrtruncate sent out ... but
3362 * that should happen as part of standard invalidation processing once
3363 * they acquire lock on the relation.
3364 */
3366
3367 /*
3368 * Update statistics. Here, it *is* correct to adjust rel_pages
3369 * without also touching reltuples, since the tuple count wasn't
3370 * changed by the truncation.
3371 */
3372 vacrel->removed_pages += orig_rel_pages - new_rel_pages;
3373 vacrel->rel_pages = new_rel_pages;
3374
3375 ereport(vacrel->verbose ? INFO : DEBUG2,
3376 (errmsg("table \"%s\": truncated %u to %u pages",
3377 vacrel->relname,
3378 orig_rel_pages, new_rel_pages)));
3379 orig_rel_pages = new_rel_pages;
3380 } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
3381}
struct Latch * MyLatch
Definition: globals.c:63
void ResetLatch(Latch *latch)
Definition: latch.c:374
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:172
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:314
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:278
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:40
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:289
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:179
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:180
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
Definition: vacuumlazy.c:3389
#define WL_TIMEOUT
Definition: waiteventset.h:37
#define WL_EXIT_ON_PM_DEATH
Definition: waiteventset.h:39
#define WL_LATCH_SET
Definition: waiteventset.h:34

References AccessExclusiveLock, LVRelState::blkno, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), INFO, InvalidOffsetNumber, MyLatch, LVRelState::nonempty_pages, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationTruncate(), LVRelState::relname, LVRelState::removed_pages, ResetLatch(), UnlockRelation(), update_vacuum_error_info(), VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, LVRelState::verbose, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 2488 of file vacuumlazy.c.

2489{
2490 bool bypass;
2491
2492 /* Should not end up here with no indexes */
2493 Assert(vacrel->nindexes > 0);
2494 Assert(vacrel->lpdead_item_pages > 0);
2495
2496 if (!vacrel->do_index_vacuuming)
2497 {
2498 Assert(!vacrel->do_index_cleanup);
2499 dead_items_reset(vacrel);
2500 return;
2501 }
2502
2503 /*
2504 * Consider bypassing index vacuuming (and heap vacuuming) entirely.
2505 *
2506 * We currently only do this in cases where the number of LP_DEAD items
2507 * for the entire VACUUM operation is close to zero. This avoids sharp
2508 * discontinuities in the duration and overhead of successive VACUUM
2509 * operations that run against the same table with a fixed workload.
2510 * Ideally, successive VACUUM operations will behave as if there are
2511 * exactly zero LP_DEAD items in cases where there are close to zero.
2512 *
2513 * This is likely to be helpful with a table that is continually affected
2514 * by UPDATEs that can mostly apply the HOT optimization, but occasionally
2515 * have small aberrations that lead to just a few heap pages retaining
2516 * only one or two LP_DEAD items. This is pretty common; even when the
2517 * DBA goes out of their way to make UPDATEs use HOT, it is practically
2518 * impossible to predict whether HOT will be applied in 100% of cases.
2519 * It's far easier to ensure that 99%+ of all UPDATEs against a table use
2520 * HOT through careful tuning.
2521 */
2522 bypass = false;
2523 if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
2524 {
2525 BlockNumber threshold;
2526
2527 Assert(vacrel->num_index_scans == 0);
2528 Assert(vacrel->lpdead_items == vacrel->dead_items_info->num_items);
2529 Assert(vacrel->do_index_vacuuming);
2530 Assert(vacrel->do_index_cleanup);
2531
2532 /*
2533 * This crossover point at which we'll start to do index vacuuming is
2534 * expressed as a percentage of the total number of heap pages in the
2535 * table that are known to have at least one LP_DEAD item. This is
2536 * much more important than the total number of LP_DEAD items, since
2537 * it's a proxy for the number of heap pages whose visibility map bits
2538 * cannot be set on account of bypassing index and heap vacuuming.
2539 *
2540 * We apply one further precautionary test: the space currently used
2541 * to store the TIDs (TIDs that now all point to LP_DEAD items) must
2542 * not exceed 32MB. This limits the risk that we will bypass index
2543 * vacuuming again and again until eventually there is a VACUUM whose
2544 * dead_items space is not CPU cache resident.
2545 *
2546 * We don't take any special steps to remember the LP_DEAD items (such
2547 * as counting them in our final update to the stats system) when the
2548 * optimization is applied. Though the accounting used in analyze.c's
2549 * acquire_sample_rows() will recognize the same LP_DEAD items as dead
2550 * rows in its own stats report, that's okay. The discrepancy should
2551 * be negligible. If this optimization is ever expanded to cover more
2552 * cases then this may need to be reconsidered.
2553 */
2554 threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
2555 bypass = (vacrel->lpdead_item_pages < threshold &&
2556 TidStoreMemoryUsage(vacrel->dead_items) < 32 * 1024 * 1024);
2557 }
2558
2559 if (bypass)
2560 {
2561 /*
2562 * There are almost zero TIDs. Behave as if there were precisely
2563 * zero: bypass index vacuuming, but do index cleanup.
2564 *
2565 * We expect that the ongoing VACUUM operation will finish very
2566 * quickly, so there is no point in considering speeding up as a
2567 * failsafe against wraparound failure. (Index cleanup is expected to
2568 * finish very quickly in cases where there were no ambulkdelete()
2569 * calls.)
2570 */
2571 vacrel->do_index_vacuuming = false;
2572 }
2573 else if (lazy_vacuum_all_indexes(vacrel))
2574 {
2575 /*
2576 * We successfully completed a round of index vacuuming. Do related
2577 * heap vacuuming now.
2578 */
2579 lazy_vacuum_heap_rel(vacrel);
2580 }
2581 else
2582 {
2583 /*
2584 * Failsafe case.
2585 *
2586 * We attempted index vacuuming, but didn't finish a full round/full
2587 * index scan. This happens when relfrozenxid or relminmxid is too
2588 * far in the past.
2589 *
2590 * From this point on the VACUUM operation will do no further index
2591 * vacuuming or heap vacuuming. This VACUUM operation won't end up
2592 * back here again.
2593 */
2595 }
2596
2597 /*
2598 * Forget the LP_DEAD items that we just vacuumed (or just decided to not
2599 * vacuum)
2600 */
2601 dead_items_reset(vacrel);
2602}
static void dead_items_reset(LVRelState *vacrel)
Definition: vacuumlazy.c:3619
#define BYPASS_THRESHOLD_PAGES
Definition: vacuumlazy.c:186
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2613
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
Definition: vacuumlazy.c:2758

References Assert(), BYPASS_THRESHOLD_PAGES, LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::dead_items_info, dead_items_reset(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, LVRelState::rel_pages, TidStoreMemoryUsage(), and VacuumFailsafeActive.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 2613 of file vacuumlazy.c.

2614{
2615 bool allindexes = true;
2616 double old_live_tuples = vacrel->rel->rd_rel->reltuples;
2617 const int progress_start_index[] = {
2620 };
2621 const int progress_end_index[] = {
2625 };
2626 int64 progress_start_val[2];
2627 int64 progress_end_val[3];
2628
2629 Assert(vacrel->nindexes > 0);
2630 Assert(vacrel->do_index_vacuuming);
2631 Assert(vacrel->do_index_cleanup);
2632
2633 /* Precheck for XID wraparound emergencies */
2635 {
2636 /* Wraparound emergency -- don't even start an index scan */
2637 return false;
2638 }
2639
2640 /*
2641 * Report that we are now vacuuming indexes and the number of indexes to
2642 * vacuum.
2643 */
2644 progress_start_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_INDEX;
2645 progress_start_val[1] = vacrel->nindexes;
2646 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
2647
2648 if (!ParallelVacuumIsActive(vacrel))
2649 {
2650 for (int idx = 0; idx < vacrel->nindexes; idx++)
2651 {
2652 Relation indrel = vacrel->indrels[idx];
2653 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2654
2655 vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat,
2656 old_live_tuples,
2657 vacrel);
2658
2659 /* Report the number of indexes vacuumed */
2661 idx + 1);
2662
2664 {
2665 /* Wraparound emergency -- end current index scan */
2666 allindexes = false;
2667 break;
2668 }
2669 }
2670 }
2671 else
2672 {
2673 /* Outsource everything to parallel variant */
2674 parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples,
2675 vacrel->num_index_scans);
2676
2677 /*
2678 * Do a postcheck to consider applying wraparound failsafe now. Note
2679 * that parallel VACUUM only gets the precheck and this postcheck.
2680 */
2682 allindexes = false;
2683 }
2684
2685 /*
2686 * We delete all LP_DEAD items from the first heap pass in all indexes on
2687 * each call here (except calls where we choose to do the failsafe). This
2688 * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2689 * of the failsafe triggering, which prevents the next call from taking
2690 * place).
2691 */
2692 Assert(vacrel->num_index_scans > 0 ||
2693 vacrel->dead_items_info->num_items == vacrel->lpdead_items);
2694 Assert(allindexes || VacuumFailsafeActive);
2695
2696 /*
2697 * Increase and report the number of index scans. Also, we reset
2698 * PROGRESS_VACUUM_INDEXES_TOTAL and PROGRESS_VACUUM_INDEXES_PROCESSED.
2699 *
2700 * We deliberately include the case where we started a round of bulk
2701 * deletes that we weren't able to finish due to the failsafe triggering.
2702 */
2703 vacrel->num_index_scans++;
2704 progress_end_val[0] = 0;
2705 progress_end_val[1] = 0;
2706 progress_end_val[2] = vacrel->num_index_scans;
2707 pgstat_progress_update_multi_param(3, progress_end_index, progress_end_val);
2708
2709 return allindexes;
2710}
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:37
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
Definition: vacuumlazy.c:3129
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)

References Assert(), LVRelState::dead_items_info, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, idx(), LVRelState::indrels, LVRelState::indstats, lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, LVRelState::pvs, RelationData::rd_rel, LVRelState::rel, and VacuumFailsafeActive.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static void lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
OffsetNumber deadoffsets,
int  num_offsets,
Buffer  vmbuffer 
)
static

Definition at line 2876 of file vacuumlazy.c.

2879{
2880 Page page = BufferGetPage(buffer);
2882 int nunused = 0;
2883 TransactionId visibility_cutoff_xid;
2884 TransactionId conflict_xid = InvalidTransactionId;
2885 bool all_frozen;
2886 LVSavedErrInfo saved_err_info;
2887 uint8 vmflags = 0;
2888
2889 Assert(vacrel->do_index_vacuuming);
2890
2892
2893 /* Update error traceback information */
2894 update_vacuum_error_info(vacrel, &saved_err_info,
2897
2898 /*
2899 * Before marking dead items unused, check whether the page will become
2900 * all-visible once that change is applied. This lets us reap the tuples
2901 * and mark the page all-visible within the same critical section,
2902 * enabling both changes to be emitted in a single WAL record. Since the
2903 * visibility checks may perform I/O and allocate memory, they must be
2904 * done outside the critical section.
2905 */
2906 if (heap_page_would_be_all_visible(vacrel->rel, buffer,
2907 vacrel->cutoffs.OldestXmin,
2908 deadoffsets, num_offsets,
2909 &all_frozen, &visibility_cutoff_xid,
2910 &vacrel->offnum))
2911 {
2912 vmflags |= VISIBILITYMAP_ALL_VISIBLE;
2913 if (all_frozen)
2914 {
2915 vmflags |= VISIBILITYMAP_ALL_FROZEN;
2916 Assert(!TransactionIdIsValid(visibility_cutoff_xid));
2917 }
2918
2919 /*
2920 * Take the lock on the vmbuffer before entering a critical section.
2921 * The heap page lock must also be held while updating the VM to
2922 * ensure consistency.
2923 */
2925 }
2926
2928
2929 for (int i = 0; i < num_offsets; i++)
2930 {
2931 ItemId itemid;
2932 OffsetNumber toff = deadoffsets[i];
2933
2934 itemid = PageGetItemId(page, toff);
2935
2936 Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2937 ItemIdSetUnused(itemid);
2938 unused[nunused++] = toff;
2939 }
2940
2941 Assert(nunused > 0);
2942
2943 /* Attempt to truncate line pointer array now */
2945
2946 if ((vmflags & VISIBILITYMAP_VALID_BITS) != 0)
2947 {
2948 /*
2949 * The page is guaranteed to have had dead line pointers, so we always
2950 * set PD_ALL_VISIBLE.
2951 */
2952 PageSetAllVisible(page);
2954 vmbuffer, vmflags,
2955 vacrel->rel->rd_locator);
2956 conflict_xid = visibility_cutoff_xid;
2957 }
2958
2959 /*
2960 * Mark buffer dirty before we write WAL.
2961 */
2962 MarkBufferDirty(buffer);
2963
2964 /* XLOG stuff */
2965 if (RelationNeedsWAL(vacrel->rel))
2966 {
2967 log_heap_prune_and_freeze(vacrel->rel, buffer,
2968 vmflags != 0 ? vmbuffer : InvalidBuffer,
2969 vmflags,
2970 conflict_xid,
2971 false, /* no cleanup lock required */
2973 NULL, 0, /* frozen */
2974 NULL, 0, /* redirected */
2975 NULL, 0, /* dead */
2976 unused, nunused);
2977 }
2978
2980
2981 if ((vmflags & VISIBILITYMAP_ALL_VISIBLE) != 0)
2982 {
2983 /* Count the newly set VM page for logging */
2984 LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
2985 vacrel->vm_new_visible_pages++;
2986 if (all_frozen)
2988 }
2989
2990 /* Revert to the previous phase information for error traceback */
2991 restore_vacuum_error_info(vacrel, &saved_err_info);
2992}
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:834
@ PRUNE_VACUUM_CLEANUP
Definition: heapam.h:230
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, Buffer vmbuffer, uint8 vmflags, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Definition: pruneheap.c:2157
RelFileLocator rd_locator
Definition: rel.h:57
static bool heap_page_would_be_all_visible(Relation rel, Buffer buf, TransactionId OldestXmin, OffsetNumber *deadoffsets, int ndeadoffsets, bool *all_frozen, TransactionId *visibility_cutoff_xid, OffsetNumber *logging_offnum)
Definition: vacuumlazy.c:3709
uint8 visibilitymap_set_vmbits(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)

References Assert(), BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage(), LVRelState::cutoffs, LVRelState::do_index_vacuuming, END_CRIT_SECTION, heap_page_would_be_all_visible(), i, InvalidBuffer, InvalidOffsetNumber, InvalidTransactionId, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, LockBuffer(), log_heap_prune_and_freeze(), MarkBufferDirty(), MaxHeapTuplesPerPage, LVRelState::offnum, VacuumCutoffs::OldestXmin, PageGetItemId(), PageSetAllVisible(), PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PRUNE_VACUUM_CLEANUP, RelationData::rd_locator, LVRelState::rel, RelationNeedsWAL, restore_vacuum_error_info(), START_CRIT_SECTION, TransactionIdIsValid, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set_vmbits(), VISIBILITYMAP_VALID_BITS, LVRelState::vm_new_visible_frozen_pages, and LVRelState::vm_new_visible_pages.

Referenced by lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2758 of file vacuumlazy.c.

2759{
2760 ReadStream *stream;
2761 BlockNumber vacuumed_pages = 0;
2762 Buffer vmbuffer = InvalidBuffer;
2763 LVSavedErrInfo saved_err_info;
2764 TidStoreIter *iter;
2765
2766 Assert(vacrel->do_index_vacuuming);
2767 Assert(vacrel->do_index_cleanup);
2768 Assert(vacrel->num_index_scans > 0);
2769
2770 /* Report that we are now vacuuming the heap */
2773
2774 /* Update error traceback information */
2775 update_vacuum_error_info(vacrel, &saved_err_info,
2778
2779 iter = TidStoreBeginIterate(vacrel->dead_items);
2780
2781 /*
2782 * Set up the read stream for vacuum's second pass through the heap.
2783 *
2784 * It is safe to use batchmode, as vacuum_reap_lp_read_stream_next() does
2785 * not need to wait for IO and does not perform locking. Once we support
2786 * parallelism it should still be fine, as presumably the holder of locks
2787 * would never be blocked by IO while holding the lock.
2788 */
2791 vacrel->bstrategy,
2792 vacrel->rel,
2795 iter,
2796 sizeof(TidStoreIterResult));
2797
2798 while (true)
2799 {
2800 BlockNumber blkno;
2801 Buffer buf;
2802 Page page;
2803 TidStoreIterResult *iter_result;
2804 Size freespace;
2806 int num_offsets;
2807
2808 vacuum_delay_point(false);
2809
2810 buf = read_stream_next_buffer(stream, (void **) &iter_result);
2811
2812 /* The relation is exhausted */
2813 if (!BufferIsValid(buf))
2814 break;
2815
2816 vacrel->blkno = blkno = BufferGetBlockNumber(buf);
2817
2818 Assert(iter_result);
2819 num_offsets = TidStoreGetBlockOffsets(iter_result, offsets, lengthof(offsets));
2820 Assert(num_offsets <= lengthof(offsets));
2821
2822 /*
2823 * Pin the visibility map page in case we need to mark the page
2824 * all-visible. In most cases this will be very cheap, because we'll
2825 * already have the correct page pinned anyway.
2826 */
2827 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
2828
2829 /* We need a non-cleanup exclusive lock to mark dead_items unused */
2831 lazy_vacuum_heap_page(vacrel, blkno, buf, offsets,
2832 num_offsets, vmbuffer);
2833
2834 /* Now that we've vacuumed the page, record its available space */
2835 page = BufferGetPage(buf);
2836 freespace = PageGetHeapFreeSpace(page);
2837
2839 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
2840 vacuumed_pages++;
2841 }
2842
2843 read_stream_end(stream);
2844 TidStoreEndIterate(iter);
2845
2846 vacrel->blkno = InvalidBlockNumber;
2847 if (BufferIsValid(vmbuffer))
2848 ReleaseBuffer(vmbuffer);
2849
2850 /*
2851 * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2852 * the second heap pass. No more, no less.
2853 */
2854 Assert(vacrel->num_index_scans > 1 ||
2855 (vacrel->dead_items_info->num_items == vacrel->lpdead_items &&
2856 vacuumed_pages == vacrel->lpdead_item_pages));
2857
2859 (errmsg("table \"%s\": removed %" PRId64 " dead item identifiers in %u pages",
2860 vacrel->relname, vacrel->dead_items_info->num_items,
2861 vacuumed_pages)));
2862
2863 /* Revert to the previous phase information for error traceback */
2864 restore_vacuum_error_info(vacrel, &saved_err_info);
2865}
#define lengthof(array)
Definition: c.h:801
#define MaxOffsetNumber
Definition: off.h:28
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:38
#define READ_STREAM_USE_BATCHING
Definition: read_stream.h:64
TidStoreIter * TidStoreBeginIterate(TidStore *ts)
Definition: tidstore.c:471
void TidStoreEndIterate(TidStoreIter *iter)
Definition: tidstore.c:518
int TidStoreGetBlockOffsets(TidStoreIterResult *result, OffsetNumber *offsets, int max_offsets)
Definition: tidstore.c:566
static BlockNumber vacuum_reap_lp_read_stream_next(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: vacuumlazy.c:2720
static void lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
Definition: vacuumlazy.c:2876

References Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, ereport, errmsg(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_vacuum_heap_page(), lengthof, LockBuffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, MaxOffsetNumber, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), READ_STREAM_USE_BATCHING, RecordPageWithFreeSpace(), LVRelState::rel, ReleaseBuffer(), LVRelState::relname, restore_vacuum_error_info(), TidStoreBeginIterate(), TidStoreEndIterate(), TidStoreGetBlockOffsets(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, vacuum_reap_lp_read_stream_next(), and visibilitymap_pin().

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 3129 of file vacuumlazy.c.

3131{
3132 IndexVacuumInfo ivinfo;
3133 LVSavedErrInfo saved_err_info;
3134
3135 ivinfo.index = indrel;
3136 ivinfo.heaprel = vacrel->rel;
3137 ivinfo.analyze_only = false;
3138 ivinfo.report_progress = false;
3139 ivinfo.estimated_count = true;
3140 ivinfo.message_level = DEBUG2;
3141 ivinfo.num_heap_tuples = reltuples;
3142 ivinfo.strategy = vacrel->bstrategy;
3143
3144 /*
3145 * Update error traceback information.
3146 *
3147 * The index name is saved during this phase and restored immediately
3148 * after this phase. See vacuum_error_callback.
3149 */
3150 Assert(vacrel->indname == NULL);
3151 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
3152 update_vacuum_error_info(vacrel, &saved_err_info,
3155
3156 /* Do bulk deletion */
3157 istat = vac_bulkdel_one_index(&ivinfo, istat, vacrel->dead_items,
3158 vacrel->dead_items_info);
3159
3160 /* Revert to the previous phase information for error traceback */
3161 restore_vacuum_error_info(vacrel, &saved_err_info);
3162 pfree(vacrel->indname);
3163 vacrel->indname = NULL;
3164
3165 return istat;
3166}
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, TidStore *dead_items, VacDeadItemsInfo *dead_items_info)
Definition: vacuum.c:2633

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3965 of file vacuumlazy.c.

3967{
3968 vacrel->blkno = saved_vacrel->blkno;
3969 vacrel->offnum = saved_vacrel->offnum;
3970 vacrel->phase = saved_vacrel->phase;
3971}
BlockNumber blkno
Definition: vacuumlazy.c:416
VacErrPhase phase
Definition: vacuumlazy.c:418
OffsetNumber offnum
Definition: vacuumlazy.c:417

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 3238 of file vacuumlazy.c.

3239{
3240 BlockNumber possibly_freeable;
3241
3242 if (!vacrel->do_rel_truncate || VacuumFailsafeActive)
3243 return false;
3244
3245 possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
3246 if (possibly_freeable > 0 &&
3247 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
3248 possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION))
3249 return true;
3250
3251 return false;
3252}
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:168
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:169

References LVRelState::do_rel_truncate, LVRelState::nonempty_pages, LVRelState::rel_pages, REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, and VacuumFailsafeActive.

Referenced by heap_vacuum_rel().

◆ update_relstats_all_indexes()

static void update_relstats_all_indexes ( LVRelState vacrel)
static

Definition at line 3847 of file vacuumlazy.c.

3848{
3849 Relation *indrels = vacrel->indrels;
3850 int nindexes = vacrel->nindexes;
3851 IndexBulkDeleteResult **indstats = vacrel->indstats;
3852
3853 Assert(vacrel->do_index_cleanup);
3854
3855 for (int idx = 0; idx < nindexes; idx++)
3856 {
3857 Relation indrel = indrels[idx];
3858 IndexBulkDeleteResult *istat = indstats[idx];
3859
3860 if (istat == NULL || istat->estimated_count)
3861 continue;
3862
3863 /* Update index statistics */
3864 vac_update_relstats(indrel,
3865 istat->num_pages,
3866 istat->num_index_tuples,
3867 0, 0,
3868 false,
3871 NULL, NULL, false);
3872 }
3873}
double num_index_tuples
Definition: genam.h:106

References Assert(), LVRelState::do_index_cleanup, IndexBulkDeleteResult::estimated_count, idx(), LVRelState::indrels, LVRelState::indstats, InvalidMultiXactId, InvalidTransactionId, LVRelState::nindexes, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by heap_vacuum_rel().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3946 of file vacuumlazy.c.

3948{
3949 if (saved_vacrel)
3950 {
3951 saved_vacrel->offnum = vacrel->offnum;
3952 saved_vacrel->blkno = vacrel->blkno;
3953 saved_vacrel->phase = vacrel->phase;
3954 }
3955
3956 vacrel->blkno = blkno;
3957 vacrel->offnum = offnum;
3958 vacrel->phase = phase;
3959}

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void *  arg)
static

Definition at line 3882 of file vacuumlazy.c.

3883{
3884 LVRelState *errinfo = arg;
3885
3886 switch (errinfo->phase)
3887 {
3889 if (BlockNumberIsValid(errinfo->blkno))
3890 {
3891 if (OffsetNumberIsValid(errinfo->offnum))
3892 errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3893 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3894 else
3895 errcontext("while scanning block %u of relation \"%s.%s\"",
3896 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3897 }
3898 else
3899 errcontext("while scanning relation \"%s.%s\"",
3900 errinfo->relnamespace, errinfo->relname);
3901 break;
3902
3904 if (BlockNumberIsValid(errinfo->blkno))
3905 {
3906 if (OffsetNumberIsValid(errinfo->offnum))
3907 errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3908 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3909 else
3910 errcontext("while vacuuming block %u of relation \"%s.%s\"",
3911 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3912 }
3913 else
3914 errcontext("while vacuuming relation \"%s.%s\"",
3915 errinfo->relnamespace, errinfo->relname);
3916 break;
3917
3919 errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3920 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3921 break;
3922
3924 errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3925 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3926 break;
3927
3929 if (BlockNumberIsValid(errinfo->blkno))
3930 errcontext("while truncating relation \"%s.%s\" to %u blocks",
3931 errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3932 break;
3933
3935 default:
3936 return; /* do nothing; the errinfo may not be
3937 * initialized */
3938 }
3939}
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define errcontext
Definition: elog.h:198
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
void * arg

References arg, LVRelState::blkno, BlockNumberIsValid(), errcontext, LVRelState::indname, LVRelState::offnum, OffsetNumberIsValid, LVRelState::phase, LVRelState::relname, LVRelState::relnamespace, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().

◆ vacuum_reap_lp_read_stream_next()

static BlockNumber vacuum_reap_lp_read_stream_next ( ReadStream stream,
void *  callback_private_data,
void *  per_buffer_data 
)
static

Definition at line 2720 of file vacuumlazy.c.

2723{
2724 TidStoreIter *iter = callback_private_data;
2725 TidStoreIterResult *iter_result;
2726
2727 iter_result = TidStoreIterateNext(iter);
2728 if (iter_result == NULL)
2729 return InvalidBlockNumber;
2730
2731 /*
2732 * Save the TidStoreIterResult for later, so we can extract the offsets.
2733 * It is safe to copy the result, according to TidStoreIterateNext().
2734 */
2735 memcpy(per_buffer_data, iter_result, sizeof(*iter_result));
2736
2737 return iter_result->blkno;
2738}
BlockNumber blkno
Definition: tidstore.h:29
TidStoreIterResult * TidStoreIterateNext(TidStoreIter *iter)
Definition: tidstore.c:493

References TidStoreIterResult::blkno, InvalidBlockNumber, and TidStoreIterateNext().

Referenced by lazy_vacuum_heap_rel().