PostgreSQL Source Code git master
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/genam.h"
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/tidstore.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xloginsert.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "common/int.h"
#include "common/pg_prng.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/read_stream.h"
#include "utils/lsyscache.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 
#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2
 
#define EAGER_SCAN_REGION_SIZE   4096
 
#define VAC_BLK_WAS_EAGER_SCANNED   (1 << 0)
 
#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM   (1 << 1)
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel)
 
static void heap_vacuum_eager_scan_setup (LVRelState *vacrel, VacuumParams *params)
 
static BlockNumber heap_vac_scan_next_block (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 
static void find_next_unskippable_block (LVRelState *vacrel, bool *skipsallvis)
 
static bool lazy_scan_new_or_empty (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
 
static void lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items, bool *vm_page_frozen)
 
static bool lazy_scan_noprune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static void lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_add (LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
 
static void dead_items_reset (LVRelState *vacrel)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_is_all_visible (LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
static void update_relstats_all_indexes (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static int cmpOffsetNumbers (const void *a, const void *b)
 
static BlockNumber vacuum_reap_lp_read_stream_next (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 187 of file vacuumlazy.c.

◆ EAGER_SCAN_REGION_SIZE

#define EAGER_SCAN_REGION_SIZE   4096

Definition at line 250 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 193 of file vacuumlazy.c.

◆ MAX_EAGER_FREEZE_SUCCESS_RATE

#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2

Definition at line 241 of file vacuumlazy.c.

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 221 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 215 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 170 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 169 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 209 of file vacuumlazy.c.

◆ VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM

#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM   (1 << 1)

Definition at line 257 of file vacuumlazy.c.

◆ VAC_BLK_WAS_EAGER_SCANNED

#define VAC_BLK_WAS_EAGER_SCANNED   (1 << 0)

Definition at line 256 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 202 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 179 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 181 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 180 of file vacuumlazy.c.

Typedef Documentation

◆ LVRelState

typedef struct LVRelState LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 224 of file vacuumlazy.c.

225{
VacErrPhase
Definition: vacuumlazy.c:225
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition: vacuumlazy.c:227
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition: vacuumlazy.c:228
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition: vacuumlazy.c:231
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition: vacuumlazy.c:230
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition: vacuumlazy.c:229
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition: vacuumlazy.c:226

Function Documentation

◆ cmpOffsetNumbers()

static int cmpOffsetNumbers ( const void *  a,
const void *  b 
)
static

Definition at line 1917 of file vacuumlazy.c.

1918{
1919 return pg_cmp_u16(*(const OffsetNumber *) a, *(const OffsetNumber *) b);
1920}
static int pg_cmp_u16(uint16 a, uint16 b)
Definition: int.h:640
int b
Definition: isn.c:71
int a
Definition: isn.c:70
uint16 OffsetNumber
Definition: off.h:24

References a, b, and pg_cmp_u16().

Referenced by lazy_scan_prune().

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool *  lock_waiter_detected 
)
static

Definition at line 3326 of file vacuumlazy.c.

3327{
3328 BlockNumber blkno;
3329 BlockNumber prefetchedUntil;
3330 instr_time starttime;
3331
3332 /* Initialize the starttime if we check for conflicting lock requests */
3333 INSTR_TIME_SET_CURRENT(starttime);
3334
3335 /*
3336 * Start checking blocks at what we believe relation end to be and move
3337 * backwards. (Strange coding of loop control is needed because blkno is
3338 * unsigned.) To make the scan faster, we prefetch a few blocks at a time
3339 * in forward direction, so that OS-level readahead can kick in.
3340 */
3341 blkno = vacrel->rel_pages;
3343 "prefetch size must be power of 2");
3344 prefetchedUntil = InvalidBlockNumber;
3345 while (blkno > vacrel->nonempty_pages)
3346 {
3347 Buffer buf;
3348 Page page;
3349 OffsetNumber offnum,
3350 maxoff;
3351 bool hastup;
3352
3353 /*
3354 * Check if another process requests a lock on our relation. We are
3355 * holding an AccessExclusiveLock here, so they will be waiting. We
3356 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
3357 * only check if that interval has elapsed once every 32 blocks to
3358 * keep the number of system calls and actual shared lock table
3359 * lookups to a minimum.
3360 */
3361 if ((blkno % 32) == 0)
3362 {
3363 instr_time currenttime;
3364 instr_time elapsed;
3365
3366 INSTR_TIME_SET_CURRENT(currenttime);
3367 elapsed = currenttime;
3368 INSTR_TIME_SUBTRACT(elapsed, starttime);
3369 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
3371 {
3373 {
3374 ereport(vacrel->verbose ? INFO : DEBUG2,
3375 (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
3376 vacrel->relname)));
3377
3378 *lock_waiter_detected = true;
3379 return blkno;
3380 }
3381 starttime = currenttime;
3382 }
3383 }
3384
3385 /*
3386 * We don't insert a vacuum delay point here, because we have an
3387 * exclusive lock on the table which we want to hold for as short a
3388 * time as possible. We still need to check for interrupts however.
3389 */
3391
3392 blkno--;
3393
3394 /* If we haven't prefetched this lot yet, do so now. */
3395 if (prefetchedUntil > blkno)
3396 {
3397 BlockNumber prefetchStart;
3398 BlockNumber pblkno;
3399
3400 prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
3401 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3402 {
3403 PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
3405 }
3406 prefetchedUntil = prefetchStart;
3407 }
3408
3410 vacrel->bstrategy);
3411
3412 /* In this phase we only need shared access to the buffer */
3414
3415 page = BufferGetPage(buf);
3416
3417 if (PageIsNew(page) || PageIsEmpty(page))
3418 {
3420 continue;
3421 }
3422
3423 hastup = false;
3424 maxoff = PageGetMaxOffsetNumber(page);
3425 for (offnum = FirstOffsetNumber;
3426 offnum <= maxoff;
3427 offnum = OffsetNumberNext(offnum))
3428 {
3429 ItemId itemid;
3430
3431 itemid = PageGetItemId(page, offnum);
3432
3433 /*
3434 * Note: any non-unused item should be taken as a reason to keep
3435 * this page. Even an LP_DEAD item makes truncation unsafe, since
3436 * we must not have cleaned out its index entries.
3437 */
3438 if (ItemIdIsUsed(itemid))
3439 {
3440 hastup = true;
3441 break; /* can stop scanning */
3442 }
3443 } /* scan along page */
3444
3446
3447 /* Done scanning if we found a tuple here */
3448 if (hastup)
3449 return blkno + 1;
3450 }
3451
3452 /*
3453 * If we fall out of the loop, all the previously-thought-to-be-empty
3454 * pages still are; we need not bother to look at the last known-nonempty
3455 * page.
3456 */
3457 return vacrel->nonempty_pages;
3458}
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:644
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4934
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5151
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:798
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:191
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:401
@ RBM_NORMAL
Definition: bufmgr.h:45
static bool PageIsEmpty(const PageData *page)
Definition: bufpage.h:224
static bool PageIsNew(const PageData *page)
Definition: bufpage.h:234
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:244
PageData * Page
Definition: bufpage.h:82
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:372
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:909
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define DEBUG2
Definition: elog.h:29
#define INFO
Definition: elog.h:34
#define ereport(elevel,...)
Definition: elog.h:149
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:194
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:367
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define FirstOffsetNumber
Definition: off.h:27
static char * buf
Definition: pg_test_fsync.c:72
@ MAIN_FORKNUM
Definition: relpath.h:58
bool verbose
Definition: vacuumlazy.c:298
BlockNumber nonempty_pages
Definition: vacuumlazy.c:341
Relation rel
Definition: vacuumlazy.c:262
BlockNumber rel_pages
Definition: vacuumlazy.c:313
BufferAccessStrategy bstrategy
Definition: vacuumlazy.c:267
char * relname
Definition: vacuumlazy.c:293
#define PREFETCH_SIZE
Definition: vacuumlazy.c:215
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:179

References AccessExclusiveLock, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BufferGetPage(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelState::nonempty_pages, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageIsNew(), PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelState::rel, LVRelState::rel_pages, LVRelState::relname, StaticAssertStmt, UnlockReleaseBuffer(), VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and LVRelState::verbose.

Referenced by lazy_truncate_heap().

◆ dead_items_add()

static void dead_items_add ( LVRelState vacrel,
BlockNumber  blkno,
OffsetNumber offsets,
int  num_offsets 
)
static

Definition at line 3533 of file vacuumlazy.c.

3535{
3536 const int prog_index[2] = {
3539 };
3540 int64 prog_val[2];
3541
3542 TidStoreSetBlockOffsets(vacrel->dead_items, blkno, offsets, num_offsets);
3543 vacrel->dead_items_info->num_items += num_offsets;
3544
3545 /* update the progress information */
3546 prog_val[0] = vacrel->dead_items_info->num_items;
3547 prog_val[1] = TidStoreMemoryUsage(vacrel->dead_items);
3548 pgstat_progress_update_multi_param(2, prog_index, prog_val);
3549}
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
int64_t int64
Definition: c.h:499
#define PROGRESS_VACUUM_DEAD_TUPLE_BYTES
Definition: progress.h:27
#define PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS
Definition: progress.h:28
VacDeadItemsInfo * dead_items_info
Definition: vacuumlazy.c:311
TidStore * dead_items
Definition: vacuumlazy.c:310
int64 num_items
Definition: vacuum.h:295
void TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: tidstore.c:345
size_t TidStoreMemoryUsage(TidStore *ts)
Definition: tidstore.c:532

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::num_items, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS, TidStoreMemoryUsage(), and TidStoreSetBlockOffsets().

Referenced by lazy_scan_noprune(), and lazy_scan_prune().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 3468 of file vacuumlazy.c.

3469{
3470 VacDeadItemsInfo *dead_items_info;
3471 int vac_work_mem = AmAutoVacuumWorkerProcess() &&
3472 autovacuum_work_mem != -1 ?
3474
3475 /*
3476 * Initialize state for a parallel vacuum. As of now, only one worker can
3477 * be used for an index, so we invoke parallelism only if there are at
3478 * least two indexes on a table.
3479 */
3480 if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
3481 {
3482 /*
3483 * Since parallel workers cannot access data in temporary tables, we
3484 * can't perform parallel vacuum on them.
3485 */
3486 if (RelationUsesLocalBuffers(vacrel->rel))
3487 {
3488 /*
3489 * Give warning only if the user explicitly tries to perform a
3490 * parallel vacuum on the temporary table.
3491 */
3492 if (nworkers > 0)
3494 (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3495 vacrel->relname)));
3496 }
3497 else
3498 vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
3499 vacrel->nindexes, nworkers,
3500 vac_work_mem,
3501 vacrel->verbose ? INFO : DEBUG2,
3502 vacrel->bstrategy);
3503
3504 /*
3505 * If parallel mode started, dead_items and dead_items_info spaces are
3506 * allocated in DSM.
3507 */
3508 if (ParallelVacuumIsActive(vacrel))
3509 {
3511 &vacrel->dead_items_info);
3512 return;
3513 }
3514 }
3515
3516 /*
3517 * Serial VACUUM case. Allocate both dead_items and dead_items_info
3518 * locally.
3519 */
3520
3521 dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));
3522 dead_items_info->max_bytes = vac_work_mem * (Size) 1024;
3523 dead_items_info->num_items = 0;
3524 vacrel->dead_items_info = dead_items_info;
3525
3526 vacrel->dead_items = TidStoreCreateLocal(dead_items_info->max_bytes, true);
3527}
int autovacuum_work_mem
Definition: autovacuum.c:121
size_t Size
Definition: c.h:576
#define WARNING
Definition: elog.h:36
int maintenance_work_mem
Definition: globals.c:132
void * palloc(Size size)
Definition: mcxt.c:1317
#define AmAutoVacuumWorkerProcess()
Definition: miscadmin.h:382
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:645
ParallelVacuumState * pvs
Definition: vacuumlazy.c:268
int nindexes
Definition: vacuumlazy.c:264
Relation * indrels
Definition: vacuumlazy.c:263
bool do_index_vacuuming
Definition: vacuumlazy.c:278
size_t max_bytes
Definition: vacuum.h:294
TidStore * TidStoreCreateLocal(size_t max_bytes, bool insert_only)
Definition: tidstore.c:162
#define ParallelVacuumIsActive(vacrel)
Definition: vacuumlazy.c:221
TidStore * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, VacDeadItemsInfo **dead_items_info_p)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int vac_work_mem, int elevel, BufferAccessStrategy bstrategy)

References AmAutoVacuumWorkerProcess, autovacuum_work_mem, LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_vacuuming, ereport, errmsg(), LVRelState::indrels, INFO, maintenance_work_mem, VacDeadItemsInfo::max_bytes, LVRelState::nindexes, VacDeadItemsInfo::num_items, palloc(), parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, LVRelState::pvs, LVRelState::rel, RelationUsesLocalBuffers, LVRelState::relname, TidStoreCreateLocal(), LVRelState::verbose, and WARNING.

Referenced by heap_vacuum_rel().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 3575 of file vacuumlazy.c.

3576{
3577 if (!ParallelVacuumIsActive(vacrel))
3578 {
3579 /* Don't bother with pfree here */
3580 return;
3581 }
3582
3583 /* End parallel mode */
3584 parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
3585 vacrel->pvs = NULL;
3586}
IndexBulkDeleteResult ** indstats
Definition: vacuumlazy.c:347
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)

References LVRelState::indstats, parallel_vacuum_end(), ParallelVacuumIsActive, and LVRelState::pvs.

Referenced by heap_vacuum_rel().

◆ dead_items_reset()

static void dead_items_reset ( LVRelState vacrel)
static

Definition at line 3555 of file vacuumlazy.c.

3556{
3557 if (ParallelVacuumIsActive(vacrel))
3558 {
3560 return;
3561 }
3562
3563 /* Recreate the tidstore with the same max_bytes limitation */
3564 TidStoreDestroy(vacrel->dead_items);
3565 vacrel->dead_items = TidStoreCreateLocal(vacrel->dead_items_info->max_bytes, true);
3566
3567 /* Reset the counter */
3568 vacrel->dead_items_info->num_items = 0;
3569}
void TidStoreDestroy(TidStore *ts)
Definition: tidstore.c:317
void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::max_bytes, VacDeadItemsInfo::num_items, parallel_vacuum_reset_dead_items(), ParallelVacuumIsActive, LVRelState::pvs, TidStoreCreateLocal(), and TidStoreDestroy().

Referenced by lazy_vacuum().

◆ find_next_unskippable_block()

static void find_next_unskippable_block ( LVRelState vacrel,
bool *  skipsallvis 
)
static

Definition at line 1665 of file vacuumlazy.c.

1666{
1667 BlockNumber rel_pages = vacrel->rel_pages;
1668 BlockNumber next_unskippable_block = vacrel->next_unskippable_block + 1;
1669 Buffer next_unskippable_vmbuffer = vacrel->next_unskippable_vmbuffer;
1670 bool next_unskippable_eager_scanned = false;
1671 bool next_unskippable_allvis;
1672
1673 *skipsallvis = false;
1674
1675 for (;; next_unskippable_block++)
1676 {
1677 uint8 mapbits = visibilitymap_get_status(vacrel->rel,
1678 next_unskippable_block,
1679 &next_unskippable_vmbuffer);
1680
1681 next_unskippable_allvis = (mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0;
1682
1683 /*
1684 * At the start of each eager scan region, normal vacuums with eager
1685 * scanning enabled reset the failure counter, allowing vacuum to
1686 * resume eager scanning if it had been suspended in the previous
1687 * region.
1688 */
1689 if (next_unskippable_block >= vacrel->next_eager_scan_region_start)
1690 {
1694 }
1695
1696 /*
1697 * A block is unskippable if it is not all visible according to the
1698 * visibility map.
1699 */
1700 if (!next_unskippable_allvis)
1701 {
1702 Assert((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0);
1703 break;
1704 }
1705
1706 /*
1707 * Caller must scan the last page to determine whether it has tuples
1708 * (caller must have the opportunity to set vacrel->nonempty_pages).
1709 * This rule avoids having lazy_truncate_heap() take access-exclusive
1710 * lock on rel to attempt a truncation that fails anyway, just because
1711 * there are tuples on the last page (it is likely that there will be
1712 * tuples on other nearby pages as well, but those can be skipped).
1713 *
1714 * Implement this by always treating the last block as unsafe to skip.
1715 */
1716 if (next_unskippable_block == rel_pages - 1)
1717 break;
1718
1719 /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1720 if (!vacrel->skipwithvm)
1721 break;
1722
1723 /*
1724 * All-frozen pages cannot contain XIDs < OldestXmin (XIDs that aren't
1725 * already frozen by now), so this page can be skipped.
1726 */
1727 if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0)
1728 continue;
1729
1730 /*
1731 * Aggressive vacuums cannot skip any all-visible pages that are not
1732 * also all-frozen.
1733 */
1734 if (vacrel->aggressive)
1735 break;
1736
1737 /*
1738 * Normal vacuums with eager scanning enabled only skip all-visible
1739 * but not all-frozen pages if they have hit the failure limit for the
1740 * current eager scan region.
1741 */
1742 if (vacrel->eager_scan_remaining_fails > 0)
1743 {
1744 next_unskippable_eager_scanned = true;
1745 break;
1746 }
1747
1748 /*
1749 * All-visible blocks are safe to skip in a normal vacuum. But
1750 * remember that the final range contains such a block for later.
1751 */
1752 *skipsallvis = true;
1753 }
1754
1755 /* write the local variables back to vacrel */
1756 vacrel->next_unskippable_block = next_unskippable_block;
1757 vacrel->next_unskippable_allvis = next_unskippable_allvis;
1758 vacrel->next_unskippable_eager_scanned = next_unskippable_eager_scanned;
1759 vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
1760}
uint8_t uint8
Definition: c.h:500
Assert(PointerIsAligned(start, uint64))
BlockNumber next_eager_scan_region_start
Definition: vacuumlazy.c:378
bool next_unskippable_eager_scanned
Definition: vacuumlazy.c:363
Buffer next_unskippable_vmbuffer
Definition: vacuumlazy.c:364
BlockNumber eager_scan_remaining_fails
Definition: vacuumlazy.c:410
bool aggressive
Definition: vacuumlazy.c:271
BlockNumber next_unskippable_block
Definition: vacuumlazy.c:361
bool skipwithvm
Definition: vacuumlazy.c:273
bool next_unskippable_allvis
Definition: vacuumlazy.c:362
BlockNumber eager_scan_max_fails_per_region
Definition: vacuumlazy.c:400
#define EAGER_SCAN_REGION_SIZE
Definition: vacuumlazy.c:250
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE

References LVRelState::aggressive, Assert(), LVRelState::eager_scan_max_fails_per_region, EAGER_SCAN_REGION_SIZE, LVRelState::eager_scan_remaining_fails, LVRelState::next_eager_scan_region_start, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::rel, LVRelState::rel_pages, LVRelState::skipwithvm, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_get_status().

Referenced by heap_vac_scan_next_block().

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( LVRelState vacrel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool *  all_frozen 
)
static

Definition at line 3600 of file vacuumlazy.c.

3603{
3604 Page page = BufferGetPage(buf);
3606 OffsetNumber offnum,
3607 maxoff;
3608 bool all_visible = true;
3609
3610 *visibility_cutoff_xid = InvalidTransactionId;
3611 *all_frozen = true;
3612
3613 maxoff = PageGetMaxOffsetNumber(page);
3614 for (offnum = FirstOffsetNumber;
3615 offnum <= maxoff && all_visible;
3616 offnum = OffsetNumberNext(offnum))
3617 {
3618 ItemId itemid;
3619 HeapTupleData tuple;
3620
3621 /*
3622 * Set the offset number so that we can display it along with any
3623 * error that occurred while processing this tuple.
3624 */
3625 vacrel->offnum = offnum;
3626 itemid = PageGetItemId(page, offnum);
3627
3628 /* Unused or redirect line pointers are of no interest */
3629 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
3630 continue;
3631
3632 ItemPointerSet(&(tuple.t_self), blockno, offnum);
3633
3634 /*
3635 * Dead line pointers can have index pointers pointing to them. So
3636 * they can't be treated as visible
3637 */
3638 if (ItemIdIsDead(itemid))
3639 {
3640 all_visible = false;
3641 *all_frozen = false;
3642 break;
3643 }
3644
3645 Assert(ItemIdIsNormal(itemid));
3646
3647 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3648 tuple.t_len = ItemIdGetLength(itemid);
3649 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
3650
3651 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
3652 buf))
3653 {
3654 case HEAPTUPLE_LIVE:
3655 {
3656 TransactionId xmin;
3657
3658 /* Check comments in lazy_scan_prune. */
3660 {
3661 all_visible = false;
3662 *all_frozen = false;
3663 break;
3664 }
3665
3666 /*
3667 * The inserter definitely committed. But is it old enough
3668 * that everyone sees it as committed?
3669 */
3670 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3671 if (!TransactionIdPrecedes(xmin,
3672 vacrel->cutoffs.OldestXmin))
3673 {
3674 all_visible = false;
3675 *all_frozen = false;
3676 break;
3677 }
3678
3679 /* Track newest xmin on page. */
3680 if (TransactionIdFollows(xmin, *visibility_cutoff_xid) &&
3682 *visibility_cutoff_xid = xmin;
3683
3684 /* Check whether this tuple is already frozen or not */
3685 if (all_visible && *all_frozen &&
3687 *all_frozen = false;
3688 }
3689 break;
3690
3691 case HEAPTUPLE_DEAD:
3695 {
3696 all_visible = false;
3697 *all_frozen = false;
3698 break;
3699 }
3700 default:
3701 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3702 break;
3703 }
3704 } /* scan along page */
3705
3706 /* Clear the offset information once we have processed the given page. */
3707 vacrel->offnum = InvalidOffsetNumber;
3708
3709 return all_visible;
3710}
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3795
static Item PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:354
uint32 TransactionId
Definition: c.h:623
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7799
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:136
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:137
@ HEAPTUPLE_LIVE
Definition: heapam.h:135
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:138
@ HEAPTUPLE_DEAD
Definition: heapam.h:134
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
Definition: htup_details.h:324
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
Definition: htup_details.h:337
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define InvalidOffsetNumber
Definition: off.h:26
#define RelationGetRelid(relation)
Definition: rel.h:513
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
OffsetNumber offnum
Definition: vacuumlazy.c:296
struct VacuumCutoffs cutoffs
Definition: vacuumlazy.c:283
TransactionId OldestXmin
Definition: vacuum.h:274
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:280
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:314
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdIsNormal(xid)
Definition: transam.h:42

References Assert(), buf, BufferGetBlockNumber(), BufferGetPage(), LVRelState::cutoffs, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin(), HeapTupleHeaderXminCommitted(), HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, and TransactionIdPrecedes().

Referenced by lazy_scan_prune(), and lazy_vacuum_heap_page().

◆ heap_vac_scan_next_block()

static BlockNumber heap_vac_scan_next_block ( ReadStream stream,
void *  callback_private_data,
void *  per_buffer_data 
)
static

Definition at line 1560 of file vacuumlazy.c.

1563{
1564 BlockNumber next_block;
1565 LVRelState *vacrel = callback_private_data;
1566 uint8 blk_info = 0;
1567
1568 /* relies on InvalidBlockNumber + 1 overflowing to 0 on first call */
1569 next_block = vacrel->current_block + 1;
1570
1571 /* Have we reached the end of the relation? */
1572 if (next_block >= vacrel->rel_pages)
1573 {
1575 {
1578 }
1579 return InvalidBlockNumber;
1580 }
1581
1582 /*
1583 * We must be in one of the three following states:
1584 */
1585 if (next_block > vacrel->next_unskippable_block ||
1587 {
1588 /*
1589 * 1. We have just processed an unskippable block (or we're at the
1590 * beginning of the scan). Find the next unskippable block using the
1591 * visibility map.
1592 */
1593 bool skipsallvis;
1594
1595 find_next_unskippable_block(vacrel, &skipsallvis);
1596
1597 /*
1598 * We now know the next block that we must process. It can be the
1599 * next block after the one we just processed, or something further
1600 * ahead. If it's further ahead, we can jump to it, but we choose to
1601 * do so only if we can skip at least SKIP_PAGES_THRESHOLD consecutive
1602 * pages. Since we're reading sequentially, the OS should be doing
1603 * readahead for us, so there's no gain in skipping a page now and
1604 * then. Skipping such a range might even discourage sequential
1605 * detection.
1606 *
1607 * This test also enables more frequent relfrozenxid advancement
1608 * during non-aggressive VACUUMs. If the range has any all-visible
1609 * pages then skipping makes updating relfrozenxid unsafe, which is a
1610 * real downside.
1611 */
1612 if (vacrel->next_unskippable_block - next_block >= SKIP_PAGES_THRESHOLD)
1613 {
1614 next_block = vacrel->next_unskippable_block;
1615 if (skipsallvis)
1616 vacrel->skippedallvis = true;
1617 }
1618 }
1619
1620 /* Now we must be in one of the two remaining states: */
1621 if (next_block < vacrel->next_unskippable_block)
1622 {
1623 /*
1624 * 2. We are processing a range of blocks that we could have skipped
1625 * but chose not to. We know that they are all-visible in the VM,
1626 * otherwise they would've been unskippable.
1627 */
1628 vacrel->current_block = next_block;
1630 *((uint8 *) per_buffer_data) = blk_info;
1631 return vacrel->current_block;
1632 }
1633 else
1634 {
1635 /*
1636 * 3. We reached the next unskippable block. Process it. On next
1637 * iteration, we will be back in state 1.
1638 */
1639 Assert(next_block == vacrel->next_unskippable_block);
1640
1641 vacrel->current_block = next_block;
1642 if (vacrel->next_unskippable_allvis)
1645 blk_info |= VAC_BLK_WAS_EAGER_SCANNED;
1646 *((uint8 *) per_buffer_data) = blk_info;
1647 return vacrel->current_block;
1648 }
1649}
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4917
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:352
BlockNumber current_block
Definition: vacuumlazy.c:360
bool skippedallvis
Definition: vacuumlazy.c:288
#define VAC_BLK_WAS_EAGER_SCANNED
Definition: vacuumlazy.c:256
static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
Definition: vacuumlazy.c:1665
#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM
Definition: vacuumlazy.c:257
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:209

References Assert(), BufferIsValid(), LVRelState::current_block, find_next_unskippable_block(), InvalidBlockNumber, InvalidBuffer, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::rel_pages, ReleaseBuffer(), SKIP_PAGES_THRESHOLD, LVRelState::skippedallvis, VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM, and VAC_BLK_WAS_EAGER_SCANNED.

Referenced by lazy_scan_heap().

◆ heap_vacuum_eager_scan_setup()

static void heap_vacuum_eager_scan_setup ( LVRelState vacrel,
VacuumParams params 
)
static

Definition at line 488 of file vacuumlazy.c.

489{
490 uint32 randseed;
491 BlockNumber allvisible;
492 BlockNumber allfrozen;
493 float first_region_ratio;
494 bool oldest_unfrozen_before_cutoff = false;
495
496 /*
497 * Initialize eager scan management fields to their disabled values.
498 * Aggressive vacuums, normal vacuums of small tables, and normal vacuums
499 * of tables without sufficiently old tuples disable eager scanning.
500 */
503 vacrel->eager_scan_remaining_fails = 0;
505
506 /* If eager scanning is explicitly disabled, just return. */
507 if (params->max_eager_freeze_failure_rate == 0)
508 return;
509
510 /*
511 * The caller will have determined whether or not an aggressive vacuum is
512 * required by either the vacuum parameters or the relative age of the
513 * oldest unfrozen transaction IDs. An aggressive vacuum must scan every
514 * all-visible page to safely advance the relfrozenxid and/or relminmxid,
515 * so scans of all-visible pages are not considered eager.
516 */
517 if (vacrel->aggressive)
518 return;
519
520 /*
521 * Aggressively vacuuming a small relation shouldn't take long, so it
522 * isn't worth amortizing. We use two times the region size as the size
523 * cutoff because the eager scan start block is a random spot somewhere in
524 * the first region, making the second region the first to be eager
525 * scanned normally.
526 */
527 if (vacrel->rel_pages < 2 * EAGER_SCAN_REGION_SIZE)
528 return;
529
530 /*
531 * We only want to enable eager scanning if we are likely to be able to
532 * freeze some of the pages in the relation.
533 *
534 * Tuples with XIDs older than OldestXmin or MXIDs older than OldestMxact
535 * are technically freezable, but we won't freeze them unless the criteria
536 * for opportunistic freezing is met. Only tuples with XIDs/MXIDs older
537 * than the FreezeLimit/MultiXactCutoff are frozen in the common case.
538 *
539 * So, as a heuristic, we wait until the FreezeLimit has advanced past the
540 * relfrozenxid or the MultiXactCutoff has advanced past the relminmxid to
541 * enable eager scanning.
542 */
545 vacrel->cutoffs.FreezeLimit))
546 oldest_unfrozen_before_cutoff = true;
547
548 if (!oldest_unfrozen_before_cutoff &&
551 vacrel->cutoffs.MultiXactCutoff))
552 oldest_unfrozen_before_cutoff = true;
553
554 if (!oldest_unfrozen_before_cutoff)
555 return;
556
557 /* We have met the criteria to eagerly scan some pages. */
558
559 /*
560 * Our success cap is MAX_EAGER_FREEZE_SUCCESS_RATE of the number of
561 * all-visible but not all-frozen blocks in the relation.
562 */
563 visibilitymap_count(vacrel->rel, &allvisible, &allfrozen);
564
567 (allvisible - allfrozen));
568
569 /* If every all-visible page is frozen, eager scanning is disabled. */
570 if (vacrel->eager_scan_remaining_successes == 0)
571 return;
572
573 /*
574 * Now calculate the bounds of the first eager scan region. Its end block
575 * will be a random spot somewhere in the first EAGER_SCAN_REGION_SIZE
576 * blocks. This affects the bounds of all subsequent regions and avoids
577 * eager scanning and failing to freeze the same blocks each vacuum of the
578 * relation.
579 */
581
583
585 params->max_eager_freeze_failure_rate <= 1);
586
590
591 /*
592 * The first region will be smaller than subsequent regions. As such,
593 * adjust the eager freeze failures tolerated for this region.
594 */
595 first_region_ratio = 1 - (float) vacrel->next_eager_scan_region_start /
597
600 first_region_ratio;
601}
uint32_t uint32
Definition: c.h:502
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3317
#define MultiXactIdIsValid(multi)
Definition: multixact.h:28
uint32 pg_prng_uint32(pg_prng_state *state)
Definition: pg_prng.c:227
pg_prng_state pg_global_prng_state
Definition: pg_prng.c:34
BlockNumber eager_scan_remaining_successes
Definition: vacuumlazy.c:389
TransactionId FreezeLimit
Definition: vacuum.h:284
TransactionId relfrozenxid
Definition: vacuum.h:258
MultiXactId relminmxid
Definition: vacuum.h:259
MultiXactId MultiXactCutoff
Definition: vacuum.h:285
double max_eager_freeze_failure_rate
Definition: vacuum.h:239
#define MAX_EAGER_FREEZE_SUCCESS_RATE
Definition: vacuumlazy.c:241
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)

References LVRelState::aggressive, Assert(), LVRelState::cutoffs, LVRelState::eager_scan_max_fails_per_region, EAGER_SCAN_REGION_SIZE, LVRelState::eager_scan_remaining_fails, LVRelState::eager_scan_remaining_successes, VacuumCutoffs::FreezeLimit, InvalidBlockNumber, VacuumParams::max_eager_freeze_failure_rate, MAX_EAGER_FREEZE_SUCCESS_RATE, VacuumCutoffs::MultiXactCutoff, MultiXactIdIsValid, MultiXactIdPrecedes(), LVRelState::next_eager_scan_region_start, pg_global_prng_state, pg_prng_uint32(), LVRelState::rel, LVRelState::rel_pages, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, TransactionIdIsNormal, TransactionIdPrecedes(), and visibilitymap_count().

Referenced by heap_vacuum_rel().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 615 of file vacuumlazy.c.

617{
618 LVRelState *vacrel;
619 bool verbose,
620 instrument,
621 skipwithvm,
622 frozenxid_updated,
623 minmulti_updated;
624 BlockNumber orig_rel_pages,
625 new_rel_pages,
626 new_rel_allvisible,
627 new_rel_allfrozen;
628 PGRUsage ru0;
629 TimestampTz starttime = 0;
630 PgStat_Counter startreadtime = 0,
631 startwritetime = 0;
632 WalUsage startwalusage = pgWalUsage;
633 BufferUsage startbufferusage = pgBufferUsage;
634 ErrorContextCallback errcallback;
635 char **indnames = NULL;
636
637 verbose = (params->options & VACOPT_VERBOSE) != 0;
638 instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
639 params->log_min_duration >= 0));
640 if (instrument)
641 {
642 pg_rusage_init(&ru0);
643 if (track_io_timing)
644 {
645 startreadtime = pgStatBlockReadTime;
646 startwritetime = pgStatBlockWriteTime;
647 }
648 }
649
650 /* Used for instrumentation and stats report */
651 starttime = GetCurrentTimestamp();
652
654 RelationGetRelid(rel));
655
656 /*
657 * Setup error traceback support for ereport() first. The idea is to set
658 * up an error context callback to display additional information on any
659 * error during a vacuum. During different phases of vacuum, we update
660 * the state so that the error context callback always display current
661 * information.
662 *
663 * Copy the names of heap rel into local memory for error reporting
664 * purposes, too. It isn't always safe to assume that we can get the name
665 * of each rel. It's convenient for code in lazy_scan_heap to always use
666 * these temp copies.
667 */
668 vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
672 vacrel->indname = NULL;
674 vacrel->verbose = verbose;
675 errcallback.callback = vacuum_error_callback;
676 errcallback.arg = vacrel;
677 errcallback.previous = error_context_stack;
678 error_context_stack = &errcallback;
679
680 /* Set up high level stuff about rel and its indexes */
681 vacrel->rel = rel;
683 &vacrel->indrels);
684 vacrel->bstrategy = bstrategy;
685 if (instrument && vacrel->nindexes > 0)
686 {
687 /* Copy index names used by instrumentation (not error reporting) */
688 indnames = palloc(sizeof(char *) * vacrel->nindexes);
689 for (int i = 0; i < vacrel->nindexes; i++)
690 indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
691 }
692
693 /*
694 * The index_cleanup param either disables index vacuuming and cleanup or
695 * forces it to go ahead when we would otherwise apply the index bypass
696 * optimization. The default is 'auto', which leaves the final decision
697 * up to lazy_vacuum().
698 *
699 * The truncate param allows user to avoid attempting relation truncation,
700 * though it can't force truncation to happen.
701 */
704 params->truncate != VACOPTVALUE_AUTO);
705
706 /*
707 * While VacuumFailSafeActive is reset to false before calling this, we
708 * still need to reset it here due to recursive calls.
709 */
710 VacuumFailsafeActive = false;
711 vacrel->consider_bypass_optimization = true;
712 vacrel->do_index_vacuuming = true;
713 vacrel->do_index_cleanup = true;
714 vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
715 if (params->index_cleanup == VACOPTVALUE_DISABLED)
716 {
717 /* Force disable index vacuuming up-front */
718 vacrel->do_index_vacuuming = false;
719 vacrel->do_index_cleanup = false;
720 }
721 else if (params->index_cleanup == VACOPTVALUE_ENABLED)
722 {
723 /* Force index vacuuming. Note that failsafe can still bypass. */
724 vacrel->consider_bypass_optimization = false;
725 }
726 else
727 {
728 /* Default/auto, make all decisions dynamically */
730 }
731
732 /* Initialize page counters explicitly (be tidy) */
733 vacrel->scanned_pages = 0;
734 vacrel->eager_scanned_pages = 0;
735 vacrel->removed_pages = 0;
736 vacrel->new_frozen_tuple_pages = 0;
737 vacrel->lpdead_item_pages = 0;
738 vacrel->missed_dead_pages = 0;
739 vacrel->nonempty_pages = 0;
740 /* dead_items_alloc allocates vacrel->dead_items later on */
741
742 /* Allocate/initialize output statistics state */
743 vacrel->new_rel_tuples = 0;
744 vacrel->new_live_tuples = 0;
745 vacrel->indstats = (IndexBulkDeleteResult **)
746 palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
747
748 /* Initialize remaining counters (be tidy) */
749 vacrel->num_index_scans = 0;
750 vacrel->tuples_deleted = 0;
751 vacrel->tuples_frozen = 0;
752 vacrel->lpdead_items = 0;
753 vacrel->live_tuples = 0;
754 vacrel->recently_dead_tuples = 0;
755 vacrel->missed_dead_tuples = 0;
756
757 vacrel->vm_new_visible_pages = 0;
758 vacrel->vm_new_visible_frozen_pages = 0;
759 vacrel->vm_new_frozen_pages = 0;
760 vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
761
762 /*
763 * Get cutoffs that determine which deleted tuples are considered DEAD,
764 * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
765 * the extent of the blocks that we'll scan in lazy_scan_heap. It has to
766 * happen in this order to ensure that the OldestXmin cutoff field works
767 * as an upper bound on the XIDs stored in the pages we'll actually scan
768 * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
769 *
770 * Next acquire vistest, a related cutoff that's used in pruning. We use
771 * vistest in combination with OldestXmin to ensure that
772 * heap_page_prune_and_freeze() always removes any deleted tuple whose
773 * xmax is < OldestXmin. lazy_scan_prune must never become confused about
774 * whether a tuple should be frozen or removed. (In the future we might
775 * want to teach lazy_scan_prune to recompute vistest from time to time,
776 * to increase the number of dead tuples it can prune away.)
777 */
778 vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
779 vacrel->vistest = GlobalVisTestFor(rel);
780 /* Initialize state used to track oldest extant XID/MXID */
781 vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
782 vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
783
784 /*
785 * Initialize state related to tracking all-visible page skipping. This is
786 * very important to determine whether or not it is safe to advance the
787 * relfrozenxid/relminmxid.
788 */
789 vacrel->skippedallvis = false;
790 skipwithvm = true;
792 {
793 /*
794 * Force aggressive mode, and disable skipping blocks using the
795 * visibility map (even those set all-frozen)
796 */
797 vacrel->aggressive = true;
798 skipwithvm = false;
799 }
800
801 vacrel->skipwithvm = skipwithvm;
802
803 /*
804 * Set up eager scan tracking state. This must happen after determining
805 * whether or not the vacuum must be aggressive, because only normal
806 * vacuums use the eager scan algorithm.
807 */
808 heap_vacuum_eager_scan_setup(vacrel, params);
809
810 if (verbose)
811 {
812 if (vacrel->aggressive)
814 (errmsg("aggressively vacuuming \"%s.%s.%s\"",
815 vacrel->dbname, vacrel->relnamespace,
816 vacrel->relname)));
817 else
819 (errmsg("vacuuming \"%s.%s.%s\"",
820 vacrel->dbname, vacrel->relnamespace,
821 vacrel->relname)));
822 }
823
824 /*
825 * Allocate dead_items memory using dead_items_alloc. This handles
826 * parallel VACUUM initialization as part of allocating shared memory
827 * space used for dead_items. (But do a failsafe precheck first, to
828 * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
829 * is already dangerously old.)
830 */
832 dead_items_alloc(vacrel, params->nworkers);
833
834 /*
835 * Call lazy_scan_heap to perform all required heap pruning, index
836 * vacuuming, and heap vacuuming (plus related processing)
837 */
838 lazy_scan_heap(vacrel);
839
840 /*
841 * Free resources managed by dead_items_alloc. This ends parallel mode in
842 * passing when necessary.
843 */
844 dead_items_cleanup(vacrel);
846
847 /*
848 * Update pg_class entries for each of rel's indexes where appropriate.
849 *
850 * Unlike the later update to rel's pg_class entry, this is not critical.
851 * Maintains relpages/reltuples statistics used by the planner only.
852 */
853 if (vacrel->do_index_cleanup)
855
856 /* Done with rel's indexes */
857 vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
858
859 /* Optionally truncate rel */
860 if (should_attempt_truncation(vacrel))
861 lazy_truncate_heap(vacrel);
862
863 /* Pop the error context stack */
864 error_context_stack = errcallback.previous;
865
866 /* Report that we are now doing final cleanup */
869
870 /*
871 * Prepare to update rel's pg_class entry.
872 *
873 * Aggressive VACUUMs must always be able to advance relfrozenxid to a
874 * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
875 * Non-aggressive VACUUMs may advance them by any amount, or not at all.
876 */
877 Assert(vacrel->NewRelfrozenXid == vacrel->cutoffs.OldestXmin ||
879 vacrel->cutoffs.relfrozenxid,
880 vacrel->NewRelfrozenXid));
881 Assert(vacrel->NewRelminMxid == vacrel->cutoffs.OldestMxact ||
883 vacrel->cutoffs.relminmxid,
884 vacrel->NewRelminMxid));
885 if (vacrel->skippedallvis)
886 {
887 /*
888 * Must keep original relfrozenxid in a non-aggressive VACUUM that
889 * chose to skip an all-visible page range. The state that tracks new
890 * values will have missed unfrozen XIDs from the pages we skipped.
891 */
892 Assert(!vacrel->aggressive);
895 }
896
897 /*
898 * For safety, clamp relallvisible to be not more than what we're setting
899 * pg_class.relpages to
900 */
901 new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
902 visibilitymap_count(rel, &new_rel_allvisible, &new_rel_allfrozen);
903 if (new_rel_allvisible > new_rel_pages)
904 new_rel_allvisible = new_rel_pages;
905
906 /*
907 * An all-frozen block _must_ be all-visible. As such, clamp the count of
908 * all-frozen blocks to the count of all-visible blocks. This matches the
909 * clamping of relallvisible above.
910 */
911 if (new_rel_allfrozen > new_rel_allvisible)
912 new_rel_allfrozen = new_rel_allvisible;
913
914 /*
915 * Now actually update rel's pg_class entry.
916 *
917 * In principle new_live_tuples could be -1 indicating that we (still)
918 * don't know the tuple count. In practice that can't happen, since we
919 * scan every page that isn't skipped using the visibility map.
920 */
921 vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
922 new_rel_allvisible, new_rel_allfrozen,
923 vacrel->nindexes > 0,
924 vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
925 &frozenxid_updated, &minmulti_updated, false);
926
927 /*
928 * Report results to the cumulative stats system, too.
929 *
930 * Deliberately avoid telling the stats system about LP_DEAD items that
931 * remain in the table due to VACUUM bypassing index and heap vacuuming.
932 * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
933 * It seems like a good idea to err on the side of not vacuuming again too
934 * soon in cases where the failsafe prevented significant amounts of heap
935 * vacuuming.
936 */
938 rel->rd_rel->relisshared,
939 Max(vacrel->new_live_tuples, 0),
940 vacrel->recently_dead_tuples +
941 vacrel->missed_dead_tuples,
942 starttime);
944
945 if (instrument)
946 {
948
949 if (verbose || params->log_min_duration == 0 ||
950 TimestampDifferenceExceeds(starttime, endtime,
951 params->log_min_duration))
952 {
953 long secs_dur;
954 int usecs_dur;
955 WalUsage walusage;
956 BufferUsage bufferusage;
958 char *msgfmt;
959 int32 diff;
960 double read_rate = 0,
961 write_rate = 0;
962 int64 total_blks_hit;
963 int64 total_blks_read;
964 int64 total_blks_dirtied;
965
966 TimestampDifference(starttime, endtime, &secs_dur, &usecs_dur);
967 memset(&walusage, 0, sizeof(WalUsage));
968 WalUsageAccumDiff(&walusage, &pgWalUsage, &startwalusage);
969 memset(&bufferusage, 0, sizeof(BufferUsage));
970 BufferUsageAccumDiff(&bufferusage, &pgBufferUsage, &startbufferusage);
971
972 total_blks_hit = bufferusage.shared_blks_hit +
973 bufferusage.local_blks_hit;
974 total_blks_read = bufferusage.shared_blks_read +
975 bufferusage.local_blks_read;
976 total_blks_dirtied = bufferusage.shared_blks_dirtied +
977 bufferusage.local_blks_dirtied;
978
980 if (verbose)
981 {
982 /*
983 * Aggressiveness already reported earlier, in dedicated
984 * VACUUM VERBOSE ereport
985 */
986 Assert(!params->is_wraparound);
987 msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
988 }
989 else if (params->is_wraparound)
990 {
991 /*
992 * While it's possible for a VACUUM to be both is_wraparound
993 * and !aggressive, that's just a corner-case -- is_wraparound
994 * implies aggressive. Produce distinct output for the corner
995 * case all the same, just in case.
996 */
997 if (vacrel->aggressive)
998 msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
999 else
1000 msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1001 }
1002 else
1003 {
1004 if (vacrel->aggressive)
1005 msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
1006 else
1007 msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
1008 }
1009 appendStringInfo(&buf, msgfmt,
1010 vacrel->dbname,
1011 vacrel->relnamespace,
1012 vacrel->relname,
1013 vacrel->num_index_scans);
1014 appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total), %u eagerly scanned\n"),
1015 vacrel->removed_pages,
1016 new_rel_pages,
1017 vacrel->scanned_pages,
1018 orig_rel_pages == 0 ? 100.0 :
1019 100.0 * vacrel->scanned_pages /
1020 orig_rel_pages,
1021 vacrel->eager_scanned_pages);
1023 _("tuples: %lld removed, %lld remain, %lld are dead but not yet removable\n"),
1024 (long long) vacrel->tuples_deleted,
1025 (long long) vacrel->new_rel_tuples,
1026 (long long) vacrel->recently_dead_tuples);
1027 if (vacrel->missed_dead_tuples > 0)
1029 _("tuples missed: %lld dead from %u pages not removed due to cleanup lock contention\n"),
1030 (long long) vacrel->missed_dead_tuples,
1031 vacrel->missed_dead_pages);
1032 diff = (int32) (ReadNextTransactionId() -
1033 vacrel->cutoffs.OldestXmin);
1035 _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
1036 vacrel->cutoffs.OldestXmin, diff);
1037 if (frozenxid_updated)
1038 {
1039 diff = (int32) (vacrel->NewRelfrozenXid -
1040 vacrel->cutoffs.relfrozenxid);
1042 _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
1043 vacrel->NewRelfrozenXid, diff);
1044 }
1045 if (minmulti_updated)
1046 {
1047 diff = (int32) (vacrel->NewRelminMxid -
1048 vacrel->cutoffs.relminmxid);
1050 _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
1051 vacrel->NewRelminMxid, diff);
1052 }
1053 appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %lld tuples frozen\n"),
1054 vacrel->new_frozen_tuple_pages,
1055 orig_rel_pages == 0 ? 100.0 :
1056 100.0 * vacrel->new_frozen_tuple_pages /
1057 orig_rel_pages,
1058 (long long) vacrel->tuples_frozen);
1059
1061 _("visibility map: %u pages set all-visible, %u pages set all-frozen (%u were all-visible)\n"),
1062 vacrel->vm_new_visible_pages,
1064 vacrel->vm_new_frozen_pages,
1065 vacrel->vm_new_frozen_pages);
1066 if (vacrel->do_index_vacuuming)
1067 {
1068 if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
1069 appendStringInfoString(&buf, _("index scan not needed: "));
1070 else
1071 appendStringInfoString(&buf, _("index scan needed: "));
1072
1073 msgfmt = _("%u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n");
1074 }
1075 else
1076 {
1078 appendStringInfoString(&buf, _("index scan bypassed: "));
1079 else
1080 appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
1081
1082 msgfmt = _("%u pages from table (%.2f%% of total) have %lld dead item identifiers\n");
1083 }
1084 appendStringInfo(&buf, msgfmt,
1085 vacrel->lpdead_item_pages,
1086 orig_rel_pages == 0 ? 100.0 :
1087 100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
1088 (long long) vacrel->lpdead_items);
1089 for (int i = 0; i < vacrel->nindexes; i++)
1090 {
1091 IndexBulkDeleteResult *istat = vacrel->indstats[i];
1092
1093 if (!istat)
1094 continue;
1095
1097 _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
1098 indnames[i],
1099 istat->num_pages,
1100 istat->pages_newly_deleted,
1101 istat->pages_deleted,
1102 istat->pages_free);
1103 }
1105 {
1106 /*
1107 * We bypass the changecount mechanism because this value is
1108 * only updated by the calling process. We also rely on the
1109 * above call to pgstat_progress_end_command() to not clear
1110 * the st_progress_param array.
1111 */
1112 appendStringInfo(&buf, _("delay time: %.3f ms\n"),
1114 }
1115 if (track_io_timing)
1116 {
1117 double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
1118 double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
1119
1120 appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
1121 read_ms, write_ms);
1122 }
1123 if (secs_dur > 0 || usecs_dur > 0)
1124 {
1125 read_rate = (double) BLCKSZ * total_blks_read /
1126 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1127 write_rate = (double) BLCKSZ * total_blks_dirtied /
1128 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1129 }
1130 appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
1131 read_rate, write_rate);
1133 _("buffer usage: %lld hits, %lld reads, %lld dirtied\n"),
1134 (long long) total_blks_hit,
1135 (long long) total_blks_read,
1136 (long long) total_blks_dirtied);
1138 _("WAL usage: %lld records, %lld full page images, %llu bytes, %lld buffers full\n"),
1139 (long long) walusage.wal_records,
1140 (long long) walusage.wal_fpi,
1141 (unsigned long long) walusage.wal_bytes,
1142 (long long) walusage.wal_buffers_full);
1143 appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
1144
1145 ereport(verbose ? INFO : LOG,
1146 (errmsg_internal("%s", buf.data)));
1147 pfree(buf.data);
1148 }
1149 }
1150
1151 /* Cleanup index statistics and index names */
1152 for (int i = 0; i < vacrel->nindexes; i++)
1153 {
1154 if (vacrel->indstats[i])
1155 pfree(vacrel->indstats[i]);
1156
1157 if (instrument)
1158 pfree(indnames[i]);
1159 }
1160}
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1720
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1780
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1644
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
PgBackendStatus * MyBEEntry
bool track_io_timing
Definition: bufmgr.c:143
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:274
#define Max(x, y)
Definition: c.h:969
int32_t int32
Definition: c.h:498
int64 TimestampTz
Definition: timestamp.h:39
char * get_database_name(Oid dbid)
Definition: dbcommands.c:3188
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1157
ErrorContextCallback * error_context_stack
Definition: elog.c:94
#define _(x)
Definition: elog.c:90
#define LOG
Definition: elog.h:31
Oid MyDatabaseId
Definition: globals.c:93
int verbose
WalUsage pgWalUsage
Definition: instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition: instrument.c:287
BufferUsage pgBufferUsage
Definition: instrument.c:20
void BufferUsageAccumDiff(BufferUsage *dst, const BufferUsage *add, const BufferUsage *sub)
Definition: instrument.c:248
int i
Definition: isn.c:74
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3449
char * pstrdup(const char *in)
Definition: mcxt.c:1699
void pfree(void *pointer)
Definition: mcxt.c:1524
void * palloc0(Size size)
Definition: mcxt.c:1347
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3331
#define InvalidMultiXactId
Definition: multixact.h:24
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int64 PgStat_Counter
Definition: pgstat.h:65
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples, TimestampTz starttime)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4107
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:39
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define PROGRESS_VACUUM_DELAY_TIME
Definition: progress.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:547
#define RelationGetNamespace(relation)
Definition: rel.h:554
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:145
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:230
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
int64 shared_blks_dirtied
Definition: instrument.h:28
int64 local_blks_hit
Definition: instrument.h:30
int64 shared_blks_read
Definition: instrument.h:27
int64 local_blks_read
Definition: instrument.h:31
int64 local_blks_dirtied
Definition: instrument.h:32
int64 shared_blks_hit
Definition: instrument.h:26
struct ErrorContextCallback * previous
Definition: elog.h:296
void(* callback)(void *arg)
Definition: elog.h:297
BlockNumber pages_deleted
Definition: genam.h:105
BlockNumber pages_newly_deleted
Definition: genam.h:104
BlockNumber pages_free
Definition: genam.h:106
BlockNumber num_pages
Definition: genam.h:100
BlockNumber vm_new_frozen_pages
Definition: vacuumlazy.c:337
int64 tuples_deleted
Definition: vacuumlazy.c:352
bool do_rel_truncate
Definition: vacuumlazy.c:280
BlockNumber scanned_pages
Definition: vacuumlazy.c:314
BlockNumber new_frozen_tuple_pages
Definition: vacuumlazy.c:323
GlobalVisState * vistest
Definition: vacuumlazy.c:284
BlockNumber removed_pages
Definition: vacuumlazy.c:322
int num_index_scans
Definition: vacuumlazy.c:350
double new_live_tuples
Definition: vacuumlazy.c:345
double new_rel_tuples
Definition: vacuumlazy.c:344
TransactionId NewRelfrozenXid
Definition: vacuumlazy.c:286
bool consider_bypass_optimization
Definition: vacuumlazy.c:275
int64 recently_dead_tuples
Definition: vacuumlazy.c:356
int64 tuples_frozen
Definition: vacuumlazy.c:353
char * dbname
Definition: vacuumlazy.c:291
BlockNumber missed_dead_pages
Definition: vacuumlazy.c:340
char * relnamespace
Definition: vacuumlazy.c:292
int64 live_tuples
Definition: vacuumlazy.c:355
int64 lpdead_items
Definition: vacuumlazy.c:354
BlockNumber lpdead_item_pages
Definition: vacuumlazy.c:339
BlockNumber eager_scanned_pages
Definition: vacuumlazy.c:320
bool do_index_cleanup
Definition: vacuumlazy.c:279
MultiXactId NewRelminMxid
Definition: vacuumlazy.c:287
int64 missed_dead_tuples
Definition: vacuumlazy.c:357
BlockNumber vm_new_visible_pages
Definition: vacuumlazy.c:326
VacErrPhase phase
Definition: vacuumlazy.c:297
char * indname
Definition: vacuumlazy.c:294
BlockNumber vm_new_visible_frozen_pages
Definition: vacuumlazy.c:334
int64 st_progress_param[PGSTAT_NUM_PROGRESS_PARAM]
Form_pg_class rd_rel
Definition: rel.h:111
MultiXactId OldestMxact
Definition: vacuum.h:275
int nworkers
Definition: vacuum.h:246
VacOptValue truncate
Definition: vacuum.h:231
bits32 options
Definition: vacuum.h:219
bool is_wraparound
Definition: vacuum.h:226
int log_min_duration
Definition: vacuum.h:227
VacOptValue index_cleanup
Definition: vacuum.h:230
int64 wal_buffers_full
Definition: instrument.h:56
uint64 wal_bytes
Definition: instrument.h:55
int64 wal_fpi
Definition: instrument.h:54
int64 wal_records
Definition: instrument.h:53
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:299
static TransactionId ReadNextTransactionId(void)
Definition: transam.h:315
bool track_cost_delay_timing
Definition: vacuum.c:80
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:2338
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:2381
bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params, struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1102
bool VacuumFailsafeActive
Definition: vacuum.c:108
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, BlockNumber num_all_frozen_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
Definition: vacuum.c:1428
#define VACOPT_VERBOSE
Definition: vacuum.h:182
@ VACOPTVALUE_AUTO
Definition: vacuum.h:203
@ VACOPTVALUE_ENABLED
Definition: vacuum.h:205
@ VACOPTVALUE_UNSPECIFIED
Definition: vacuum.h:202
@ VACOPTVALUE_DISABLED
Definition: vacuum.h:204
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition: vacuum.h:188
static void dead_items_cleanup(LVRelState *vacrel)
Definition: vacuumlazy.c:3575
static void update_relstats_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3716
static void vacuum_error_callback(void *arg)
Definition: vacuumlazy.c:3751
static void lazy_truncate_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:3195
static bool should_attempt_truncation(LVRelState *vacrel)
Definition: vacuumlazy.c:3175
static void lazy_scan_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:1199
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
Definition: vacuumlazy.c:2945
static void heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params)
Definition: vacuumlazy.c:488
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
Definition: vacuumlazy.c:3468
bool IsInParallelMode(void)
Definition: xact.c:1089

References _, LVRelState::aggressive, AmAutoVacuumWorkerProcess, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert(), LVRelState::bstrategy, buf, BufferUsageAccumDiff(), ErrorContextCallback::callback, LVRelState::consider_bypass_optimization, LVRelState::cutoffs, LVRelState::dbname, dead_items_alloc(), dead_items_cleanup(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, LVRelState::eager_scanned_pages, ereport, errmsg(), errmsg_internal(), error_context_stack, VacuumCutoffs::FreezeLimit, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), GlobalVisTestFor(), heap_vacuum_eager_scan_setup(), i, VacuumParams::index_cleanup, LVRelState::indname, LVRelState::indrels, LVRelState::indstats, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsInParallelMode(), lazy_check_wraparound_failsafe(), lazy_scan_heap(), lazy_truncate_heap(), LVRelState::live_tuples, BufferUsage::local_blks_dirtied, BufferUsage::local_blks_hit, BufferUsage::local_blks_read, LOG, VacuumParams::log_min_duration, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, Max, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, VacuumCutoffs::MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyBEEntry, MyDatabaseId, LVRelState::new_frozen_tuple_pages, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, NoLock, LVRelState::nonempty_pages, LVRelState::num_index_scans, IndexBulkDeleteResult::num_pages, VacuumParams::nworkers, VacuumCutoffs::OldestMxact, VacuumCutoffs::OldestXmin, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgBufferUsage, pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, LVRelState::phase, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_DELAY_TIME, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, pstrdup(), RelationData::rd_rel, ReadNextTransactionId(), LVRelState::recently_dead_tuples, LVRelState::rel, LVRelState::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, LVRelState::relname, LVRelState::relnamespace, LVRelState::removed_pages, RowExclusiveLock, LVRelState::scanned_pages, BufferUsage::shared_blks_dirtied, BufferUsage::shared_blks_hit, BufferUsage::shared_blks_read, should_attempt_truncation(), LVRelState::skippedallvis, LVRelState::skipwithvm, PgBackendStatus::st_progress_param, TimestampDifference(), TimestampDifferenceExceeds(), track_cost_delay_timing, track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelState::tuples_deleted, LVRelState::tuples_frozen, update_relstats_all_indexes(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_get_cutoffs(), VacuumFailsafeActive, LVRelState::verbose, verbose, visibilitymap_count(), LVRelState::vistest, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, WalUsage::wal_buffers_full, WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_records, and WalUsageAccumDiff().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 2945 of file vacuumlazy.c.

2946{
2947 /* Don't warn more than once per VACUUM */
2949 return true;
2950
2952 {
2953 const int progress_index[] = {
2956 };
2957 int64 progress_val[2] = {0, 0};
2958
2959 VacuumFailsafeActive = true;
2960
2961 /*
2962 * Abandon use of a buffer access strategy to allow use of all of
2963 * shared buffers. We assume the caller who allocated the memory for
2964 * the BufferAccessStrategy will free it.
2965 */
2966 vacrel->bstrategy = NULL;
2967
2968 /* Disable index vacuuming, index cleanup, and heap rel truncation */
2969 vacrel->do_index_vacuuming = false;
2970 vacrel->do_index_cleanup = false;
2971 vacrel->do_rel_truncate = false;
2972
2973 /* Reset the progress counters */
2974 pgstat_progress_update_multi_param(2, progress_index, progress_val);
2975
2977 (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2978 vacrel->dbname, vacrel->relnamespace, vacrel->relname,
2979 vacrel->num_index_scans),
2980 errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
2981 errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2982 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2983
2984 /* Stop applying cost limits from this point on */
2985 VacuumCostActive = false;
2987
2988 return true;
2989 }
2990
2991 return false;
2992}
#define unlikely(x)
Definition: c.h:347
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errhint(const char *fmt,...)
Definition: elog.c:1317
bool VacuumCostActive
Definition: globals.c:157
int VacuumCostBalance
Definition: globals.c:156
#define PROGRESS_VACUUM_INDEXES_PROCESSED
Definition: progress.h:30
#define PROGRESS_VACUUM_INDEXES_TOTAL
Definition: progress.h:29
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1270

References LVRelState::bstrategy, LVRelState::cutoffs, LVRelState::dbname, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errdetail(), errhint(), errmsg(), LVRelState::num_index_scans, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, LVRelState::relname, LVRelState::relnamespace, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, VacuumFailsafeActive, and WARNING.

Referenced by heap_vacuum_rel(), lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 2998 of file vacuumlazy.c.

2999{
3000 double reltuples = vacrel->new_rel_tuples;
3001 bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
3002 const int progress_start_index[] = {
3005 };
3006 const int progress_end_index[] = {
3009 };
3010 int64 progress_start_val[2];
3011 int64 progress_end_val[2] = {0, 0};
3012
3013 Assert(vacrel->do_index_cleanup);
3014 Assert(vacrel->nindexes > 0);
3015
3016 /*
3017 * Report that we are now cleaning up indexes and the number of indexes to
3018 * cleanup.
3019 */
3020 progress_start_val[0] = PROGRESS_VACUUM_PHASE_INDEX_CLEANUP;
3021 progress_start_val[1] = vacrel->nindexes;
3022 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
3023
3024 if (!ParallelVacuumIsActive(vacrel))
3025 {
3026 for (int idx = 0; idx < vacrel->nindexes; idx++)
3027 {
3028 Relation indrel = vacrel->indrels[idx];
3029 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
3030
3031 vacrel->indstats[idx] =
3032 lazy_cleanup_one_index(indrel, istat, reltuples,
3033 estimated_count, vacrel);
3034
3035 /* Report the number of indexes cleaned up */
3037 idx + 1);
3038 }
3039 }
3040 else
3041 {
3042 /* Outsource everything to parallel variant */
3043 parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
3044 vacrel->num_index_scans,
3045 estimated_count);
3046 }
3047
3048 /* Reset the progress counters */
3049 pgstat_progress_update_multi_param(2, progress_end_index, progress_end_val);
3050}
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:37
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
Definition: vacuumlazy.c:3115
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)

References Assert(), LVRelState::do_index_cleanup, idx(), LVRelState::indrels, LVRelState::indstats, lazy_cleanup_one_index(), LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::num_index_scans, parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelState::pvs, LVRelState::rel_pages, and LVRelState::scanned_pages.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 3115 of file vacuumlazy.c.

3118{
3119 IndexVacuumInfo ivinfo;
3120 LVSavedErrInfo saved_err_info;
3121
3122 ivinfo.index = indrel;
3123 ivinfo.heaprel = vacrel->rel;
3124 ivinfo.analyze_only = false;
3125 ivinfo.report_progress = false;
3126 ivinfo.estimated_count = estimated_count;
3127 ivinfo.message_level = DEBUG2;
3128
3129 ivinfo.num_heap_tuples = reltuples;
3130 ivinfo.strategy = vacrel->bstrategy;
3131
3132 /*
3133 * Update error traceback information.
3134 *
3135 * The index name is saved during this phase and restored immediately
3136 * after this phase. See vacuum_error_callback.
3137 */
3138 Assert(vacrel->indname == NULL);
3139 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
3140 update_vacuum_error_info(vacrel, &saved_err_info,
3143
3144 istat = vac_cleanup_one_index(&ivinfo, istat);
3145
3146 /* Revert to the previous phase information for error traceback */
3147 restore_vacuum_error_info(vacrel, &saved_err_info);
3148 pfree(vacrel->indname);
3149 vacrel->indname = NULL;
3150
3151 return istat;
3152}
Relation index
Definition: genam.h:69
double num_heap_tuples
Definition: genam.h:75
bool analyze_only
Definition: genam.h:71
BufferAccessStrategy strategy
Definition: genam.h:76
Relation heaprel
Definition: genam.h:70
bool report_progress
Definition: genam.h:72
int message_level
Definition: genam.h:74
bool estimated_count
Definition: genam.h:73
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition: vacuum.c:2630
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
Definition: vacuumlazy.c:3834
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
Definition: vacuumlazy.c:3815

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel)
static

Definition at line 1199 of file vacuumlazy.c.

1200{
1201 ReadStream *stream;
1202 BlockNumber rel_pages = vacrel->rel_pages,
1203 blkno = 0,
1204 next_fsm_block_to_vacuum = 0;
1205 BlockNumber orig_eager_scan_success_limit =
1206 vacrel->eager_scan_remaining_successes; /* for logging */
1207 Buffer vmbuffer = InvalidBuffer;
1208 const int initprog_index[] = {
1212 };
1213 int64 initprog_val[3];
1214
1215 /* Report that we're scanning the heap, advertising total # of blocks */
1216 initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
1217 initprog_val[1] = rel_pages;
1218 initprog_val[2] = vacrel->dead_items_info->max_bytes;
1219 pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
1220
1221 /* Initialize for the first heap_vac_scan_next_block() call */
1224 vacrel->next_unskippable_allvis = false;
1225 vacrel->next_unskippable_eager_scanned = false;
1227
1228 /* Set up the read stream for vacuum's first pass through the heap */
1230 vacrel->bstrategy,
1231 vacrel->rel,
1234 vacrel,
1235 sizeof(uint8));
1236
1237 while (true)
1238 {
1239 Buffer buf;
1240 Page page;
1241 uint8 blk_info = 0;
1242 bool has_lpdead_items;
1243 void *per_buffer_data = NULL;
1244 bool vm_page_frozen = false;
1245 bool got_cleanup_lock = false;
1246
1247 vacuum_delay_point(false);
1248
1249 /*
1250 * Regularly check if wraparound failsafe should trigger.
1251 *
1252 * There is a similar check inside lazy_vacuum_all_indexes(), but
1253 * relfrozenxid might start to look dangerously old before we reach
1254 * that point. This check also provides failsafe coverage for the
1255 * one-pass strategy, and the two-pass strategy with the index_cleanup
1256 * param set to 'off'.
1257 */
1258 if (vacrel->scanned_pages > 0 &&
1259 vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0)
1261
1262 /*
1263 * Consider if we definitely have enough space to process TIDs on page
1264 * already. If we are close to overrunning the available space for
1265 * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
1266 * this page. However, let's force at least one page-worth of tuples
1267 * to be stored as to ensure we do at least some work when the memory
1268 * configured is so low that we run out before storing anything.
1269 */
1270 if (vacrel->dead_items_info->num_items > 0 &&
1272 {
1273 /*
1274 * Before beginning index vacuuming, we release any pin we may
1275 * hold on the visibility map page. This isn't necessary for
1276 * correctness, but we do it anyway to avoid holding the pin
1277 * across a lengthy, unrelated operation.
1278 */
1279 if (BufferIsValid(vmbuffer))
1280 {
1281 ReleaseBuffer(vmbuffer);
1282 vmbuffer = InvalidBuffer;
1283 }
1284
1285 /* Perform a round of index and heap vacuuming */
1286 vacrel->consider_bypass_optimization = false;
1287 lazy_vacuum(vacrel);
1288
1289 /*
1290 * Vacuum the Free Space Map to make newly-freed space visible on
1291 * upper-level FSM pages. Note that blkno is the previously
1292 * processed block.
1293 */
1294 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1295 blkno + 1);
1296 next_fsm_block_to_vacuum = blkno;
1297
1298 /* Report that we are once again scanning the heap */
1301 }
1302
1303 buf = read_stream_next_buffer(stream, &per_buffer_data);
1304
1305 /* The relation is exhausted. */
1306 if (!BufferIsValid(buf))
1307 break;
1308
1309 blk_info = *((uint8 *) per_buffer_data);
1311 page = BufferGetPage(buf);
1312 blkno = BufferGetBlockNumber(buf);
1313
1314 vacrel->scanned_pages++;
1315 if (blk_info & VAC_BLK_WAS_EAGER_SCANNED)
1316 vacrel->eager_scanned_pages++;
1317
1318 /* Report as block scanned, update error traceback information */
1321 blkno, InvalidOffsetNumber);
1322
1323 /*
1324 * Pin the visibility map page in case we need to mark the page
1325 * all-visible. In most cases this will be very cheap, because we'll
1326 * already have the correct page pinned anyway.
1327 */
1328 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
1329
1330 /*
1331 * We need a buffer cleanup lock to prune HOT chains and defragment
1332 * the page in lazy_scan_prune. But when it's not possible to acquire
1333 * a cleanup lock right away, we may be able to settle for reduced
1334 * processing using lazy_scan_noprune.
1335 */
1336 got_cleanup_lock = ConditionalLockBufferForCleanup(buf);
1337
1338 if (!got_cleanup_lock)
1340
1341 /* Check for new or empty pages before lazy_scan_[no]prune call */
1342 if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock,
1343 vmbuffer))
1344 {
1345 /* Processed as new/empty page (lock and pin released) */
1346 continue;
1347 }
1348
1349 /*
1350 * If we didn't get the cleanup lock, we can still collect LP_DEAD
1351 * items in the dead_items area for later vacuuming, count live and
1352 * recently dead tuples for vacuum logging, and determine if this
1353 * block could later be truncated. If we encounter any xid/mxids that
1354 * require advancing the relfrozenxid/relminxid, we'll have to wait
1355 * for a cleanup lock and call lazy_scan_prune().
1356 */
1357 if (!got_cleanup_lock &&
1358 !lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
1359 {
1360 /*
1361 * lazy_scan_noprune could not do all required processing. Wait
1362 * for a cleanup lock, and call lazy_scan_prune in the usual way.
1363 */
1364 Assert(vacrel->aggressive);
1367 got_cleanup_lock = true;
1368 }
1369
1370 /*
1371 * If we have a cleanup lock, we must now prune, freeze, and count
1372 * tuples. We may have acquired the cleanup lock originally, or we may
1373 * have gone back and acquired it after lazy_scan_noprune() returned
1374 * false. Either way, the page hasn't been processed yet.
1375 *
1376 * Like lazy_scan_noprune(), lazy_scan_prune() will count
1377 * recently_dead_tuples and live tuples for vacuum logging, determine
1378 * if the block can later be truncated, and accumulate the details of
1379 * remaining LP_DEAD line pointers on the page into dead_items. These
1380 * dead items include those pruned by lazy_scan_prune() as well as
1381 * line pointers previously marked LP_DEAD.
1382 */
1383 if (got_cleanup_lock)
1384 lazy_scan_prune(vacrel, buf, blkno, page,
1385 vmbuffer,
1387 &has_lpdead_items, &vm_page_frozen);
1388
1389 /*
1390 * Count an eagerly scanned page as a failure or a success.
1391 *
1392 * Only lazy_scan_prune() freezes pages, so if we didn't get the
1393 * cleanup lock, we won't have frozen the page. However, we only count
1394 * pages that were too new to require freezing as eager freeze
1395 * failures.
1396 *
1397 * We could gather more information from lazy_scan_noprune() about
1398 * whether or not there were tuples with XIDs or MXIDs older than the
1399 * FreezeLimit or MultiXactCutoff. However, for simplicity, we simply
1400 * exclude pages skipped due to cleanup lock contention from eager
1401 * freeze algorithm caps.
1402 */
1403 if (got_cleanup_lock &&
1404 (blk_info & VAC_BLK_WAS_EAGER_SCANNED))
1405 {
1406 /* Aggressive vacuums do not eager scan. */
1407 Assert(!vacrel->aggressive);
1408
1409 if (vm_page_frozen)
1410 {
1413
1414 if (vacrel->eager_scan_remaining_successes == 0)
1415 {
1416 /*
1417 * If we hit our success cap, permanently disable eager
1418 * scanning by setting the other eager scan management
1419 * fields to their disabled values.
1420 */
1421 vacrel->eager_scan_remaining_fails = 0;
1424
1425 ereport(vacrel->verbose ? INFO : DEBUG2,
1426 (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of \"%s.%s.%s\"",
1427 orig_eager_scan_success_limit,
1428 vacrel->dbname, vacrel->relnamespace,
1429 vacrel->relname)));
1430 }
1431 }
1432 else
1433 {
1436 }
1437 }
1438
1439 /*
1440 * Now drop the buffer lock and, potentially, update the FSM.
1441 *
1442 * Our goal is to update the freespace map the last time we touch the
1443 * page. If we'll process a block in the second pass, we may free up
1444 * additional space on the page, so it is better to update the FSM
1445 * after the second pass. If the relation has no indexes, or if index
1446 * vacuuming is disabled, there will be no second heap pass; if this
1447 * particular page has no dead items, the second heap pass will not
1448 * touch this page. So, in those cases, update the FSM now.
1449 *
1450 * Note: In corner cases, it's possible to miss updating the FSM
1451 * entirely. If index vacuuming is currently enabled, we'll skip the
1452 * FSM update now. But if failsafe mode is later activated, or there
1453 * are so few dead tuples that index vacuuming is bypassed, there will
1454 * also be no opportunity to update the FSM later, because we'll never
1455 * revisit this page. Since updating the FSM is desirable but not
1456 * absolutely required, that's OK.
1457 */
1458 if (vacrel->nindexes == 0
1459 || !vacrel->do_index_vacuuming
1460 || !has_lpdead_items)
1461 {
1462 Size freespace = PageGetHeapFreeSpace(page);
1463
1465 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1466
1467 /*
1468 * Periodically perform FSM vacuuming to make newly-freed space
1469 * visible on upper FSM pages. This is done after vacuuming if the
1470 * table has indexes. There will only be newly-freed space if we
1471 * held the cleanup lock and lazy_scan_prune() was called.
1472 */
1473 if (got_cleanup_lock && vacrel->nindexes == 0 && has_lpdead_items &&
1474 blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1475 {
1476 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1477 blkno);
1478 next_fsm_block_to_vacuum = blkno;
1479 }
1480 }
1481 else
1483 }
1484
1485 vacrel->blkno = InvalidBlockNumber;
1486 if (BufferIsValid(vmbuffer))
1487 ReleaseBuffer(vmbuffer);
1488
1489 /*
1490 * Report that everything is now scanned. We never skip scanning the last
1491 * block in the relation, so we can pass rel_pages here.
1492 */
1494 rel_pages);
1495
1496 /* now we can compute the new value for pg_class.reltuples */
1497 vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1498 vacrel->scanned_pages,
1499 vacrel->live_tuples);
1500
1501 /*
1502 * Also compute the total number of surviving heap entries. In the
1503 * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1504 */
1505 vacrel->new_rel_tuples =
1506 Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1507 vacrel->missed_dead_tuples;
1508
1509 read_stream_end(stream);
1510
1511 /*
1512 * Do index vacuuming (call each index's ambulkdelete routine), then do
1513 * related heap vacuuming
1514 */
1515 if (vacrel->dead_items_info->num_items > 0)
1516 lazy_vacuum(vacrel);
1517
1518 /*
1519 * Vacuum the remainder of the Free Space Map. We must do this whether or
1520 * not there were indexes, and whether or not we bypassed index vacuuming.
1521 * We can pass rel_pages here because we never skip scanning the last
1522 * block of the relation.
1523 */
1524 if (rel_pages > next_fsm_block_to_vacuum)
1525 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, rel_pages);
1526
1527 /* report all blocks vacuumed */
1529
1530 /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1531 if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1533}
void CheckBufferIsPinnedOnce(Buffer buffer)
Definition: bufmgr.c:5198
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5231
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5392
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:190
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:980
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:377
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:194
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:34
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES
Definition: progress.h:26
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
Definition: read_stream.c:742
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
Definition: read_stream.c:688
void read_stream_end(ReadStream *stream)
Definition: read_stream.c:1023
#define READ_STREAM_MAINTENANCE
Definition: read_stream.h:28
BlockNumber blkno
Definition: vacuumlazy.c:295
void vacuum_delay_point(bool is_analyze)
Definition: vacuum.c:2402
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1332
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items, bool *vm_page_frozen)
Definition: vacuumlazy.c:1940
static BlockNumber heap_vac_scan_next_block(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: vacuumlazy.c:1560
static void lazy_vacuum(LVRelState *vacrel)
Definition: vacuumlazy.c:2444
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2998
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
Definition: vacuumlazy.c:2233
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
Definition: vacuumlazy.c:1797
#define FAILSAFE_EVERY_PAGES
Definition: vacuumlazy.c:193
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:202
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)

References LVRelState::aggressive, Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), CheckBufferIsPinnedOnce(), ConditionalLockBufferForCleanup(), LVRelState::consider_bypass_optimization, LVRelState::current_block, LVRelState::dbname, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::eager_scan_max_fails_per_region, LVRelState::eager_scan_remaining_fails, LVRelState::eager_scan_remaining_successes, LVRelState::eager_scanned_pages, ereport, errmsg(), FAILSAFE_EVERY_PAGES, FreeSpaceMapVacuumRange(), heap_vac_scan_next_block(), INFO, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_new_or_empty(), lazy_scan_noprune(), lazy_scan_prune(), lazy_vacuum(), LVRelState::live_tuples, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, Max, VacDeadItemsInfo::max_bytes, LVRelState::missed_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::next_eager_scan_region_start, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::nindexes, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), LVRelState::recently_dead_tuples, RecordPageWithFreeSpace(), LVRelState::rel, LVRelState::rel_pages, ReleaseBuffer(), LVRelState::relname, LVRelState::relnamespace, LVRelState::scanned_pages, TidStoreMemoryUsage(), UnlockReleaseBuffer(), update_vacuum_error_info(), VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM, VAC_BLK_WAS_EAGER_SCANNED, vac_estimate_reltuples(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, LVRelState::verbose, and visibilitymap_pin().

Referenced by heap_vacuum_rel().

◆ lazy_scan_new_or_empty()

static bool lazy_scan_new_or_empty ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool  sharelock,
Buffer  vmbuffer 
)
static

Definition at line 1797 of file vacuumlazy.c.

1799{
1800 Size freespace;
1801
1802 if (PageIsNew(page))
1803 {
1804 /*
1805 * All-zeroes pages can be left over if either a backend extends the
1806 * relation by a single page, but crashes before the newly initialized
1807 * page has been written out, or when bulk-extending the relation
1808 * (which creates a number of empty pages at the tail end of the
1809 * relation), and then enters them into the FSM.
1810 *
1811 * Note we do not enter the page into the visibilitymap. That has the
1812 * downside that we repeatedly visit this page in subsequent vacuums,
1813 * but otherwise we'll never discover the space on a promoted standby.
1814 * The harm of repeated checking ought to normally not be too bad. The
1815 * space usually should be used at some point, otherwise there
1816 * wouldn't be any regular vacuums.
1817 *
1818 * Make sure these pages are in the FSM, to ensure they can be reused.
1819 * Do that by testing if there's any space recorded for the page. If
1820 * not, enter it. We do so after releasing the lock on the heap page,
1821 * the FSM is approximate, after all.
1822 */
1824
1825 if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1826 {
1827 freespace = BLCKSZ - SizeOfPageHeaderData;
1828
1829 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1830 }
1831
1832 return true;
1833 }
1834
1835 if (PageIsEmpty(page))
1836 {
1837 /*
1838 * It seems likely that caller will always be able to get a cleanup
1839 * lock on an empty page. But don't take any chances -- escalate to
1840 * an exclusive lock (still don't need a cleanup lock, though).
1841 */
1842 if (sharelock)
1843 {
1846
1847 if (!PageIsEmpty(page))
1848 {
1849 /* page isn't new or empty -- keep lock and pin for now */
1850 return false;
1851 }
1852 }
1853 else
1854 {
1855 /* Already have a full cleanup lock (which is more than enough) */
1856 }
1857
1858 /*
1859 * Unlike new pages, empty pages are always set all-visible and
1860 * all-frozen.
1861 */
1862 if (!PageIsAllVisible(page))
1863 {
1864 uint8 old_vmbits;
1865
1867
1868 /* mark buffer dirty before writing a WAL record */
1870
1871 /*
1872 * It's possible that another backend has extended the heap,
1873 * initialized the page, and then failed to WAL-log the page due
1874 * to an ERROR. Since heap extension is not WAL-logged, recovery
1875 * might try to replay our record setting the page all-visible and
1876 * find that the page isn't initialized, which will cause a PANIC.
1877 * To prevent that, check whether the page has been previously
1878 * WAL-logged, and if not, do that now.
1879 */
1880 if (RelationNeedsWAL(vacrel->rel) &&
1882 log_newpage_buffer(buf, true);
1883
1884 PageSetAllVisible(page);
1885 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
1887 vmbuffer, InvalidTransactionId,
1891
1892 /*
1893 * If the page wasn't already set all-visible and/or all-frozen in
1894 * the VM, count it as newly set for logging.
1895 */
1896 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
1897 {
1898 vacrel->vm_new_visible_pages++;
1900 }
1901 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0)
1902 vacrel->vm_new_frozen_pages++;
1903 }
1904
1905 freespace = PageGetHeapFreeSpace(page);
1907 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1908 return true;
1909 }
1910
1911 /* page isn't new or empty -- keep lock and pin */
1912 return false;
1913}
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2596
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:192
static bool PageIsAllVisible(const PageData *page)
Definition: bufpage.h:429
#define SizeOfPageHeaderData
Definition: bufpage.h:217
static void PageSetAllVisible(Page page)
Definition: bufpage.h:434
static XLogRecPtr PageGetLSN(const PageData *page)
Definition: bufpage.h:386
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:244
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151
#define RelationNeedsWAL(relation)
Definition: rel.h:636
uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1237

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, END_CRIT_SECTION, GetRecordedFreeSpace(), InvalidTransactionId, InvalidXLogRecPtr, LockBuffer(), log_newpage_buffer(), MarkBufferDirty(), PageGetHeapFreeSpace(), PageGetLSN(), PageIsAllVisible(), PageIsEmpty(), PageIsNew(), PageSetAllVisible(), RecordPageWithFreeSpace(), LVRelState::rel, RelationNeedsWAL, SizeOfPageHeaderData, START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, and LVRelState::vm_new_visible_pages.

Referenced by lazy_scan_heap().

◆ lazy_scan_noprune()

static bool lazy_scan_noprune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool *  has_lpdead_items 
)
static

Definition at line 2233 of file vacuumlazy.c.

2238{
2239 OffsetNumber offnum,
2240 maxoff;
2241 int lpdead_items,
2242 live_tuples,
2243 recently_dead_tuples,
2244 missed_dead_tuples;
2245 bool hastup;
2246 HeapTupleHeader tupleheader;
2247 TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
2248 MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
2250
2251 Assert(BufferGetBlockNumber(buf) == blkno);
2252
2253 hastup = false; /* for now */
2254
2255 lpdead_items = 0;
2256 live_tuples = 0;
2257 recently_dead_tuples = 0;
2258 missed_dead_tuples = 0;
2259
2260 maxoff = PageGetMaxOffsetNumber(page);
2261 for (offnum = FirstOffsetNumber;
2262 offnum <= maxoff;
2263 offnum = OffsetNumberNext(offnum))
2264 {
2265 ItemId itemid;
2266 HeapTupleData tuple;
2267
2268 vacrel->offnum = offnum;
2269 itemid = PageGetItemId(page, offnum);
2270
2271 if (!ItemIdIsUsed(itemid))
2272 continue;
2273
2274 if (ItemIdIsRedirected(itemid))
2275 {
2276 hastup = true;
2277 continue;
2278 }
2279
2280 if (ItemIdIsDead(itemid))
2281 {
2282 /*
2283 * Deliberately don't set hastup=true here. See same point in
2284 * lazy_scan_prune for an explanation.
2285 */
2286 deadoffsets[lpdead_items++] = offnum;
2287 continue;
2288 }
2289
2290 hastup = true; /* page prevents rel truncation */
2291 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
2292 if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
2293 &NoFreezePageRelfrozenXid,
2294 &NoFreezePageRelminMxid))
2295 {
2296 /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
2297 if (vacrel->aggressive)
2298 {
2299 /*
2300 * Aggressive VACUUMs must always be able to advance rel's
2301 * relfrozenxid to a value >= FreezeLimit (and be able to
2302 * advance rel's relminmxid to a value >= MultiXactCutoff).
2303 * The ongoing aggressive VACUUM won't be able to do that
2304 * unless it can freeze an XID (or MXID) from this tuple now.
2305 *
2306 * The only safe option is to have caller perform processing
2307 * of this page using lazy_scan_prune. Caller might have to
2308 * wait a while for a cleanup lock, but it can't be helped.
2309 */
2310 vacrel->offnum = InvalidOffsetNumber;
2311 return false;
2312 }
2313
2314 /*
2315 * Non-aggressive VACUUMs are under no obligation to advance
2316 * relfrozenxid (even by one XID). We can be much laxer here.
2317 *
2318 * Currently we always just accept an older final relfrozenxid
2319 * and/or relminmxid value. We never make caller wait or work a
2320 * little harder, even when it likely makes sense to do so.
2321 */
2322 }
2323
2324 ItemPointerSet(&(tuple.t_self), blkno, offnum);
2325 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2326 tuple.t_len = ItemIdGetLength(itemid);
2327 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
2328
2329 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
2330 buf))
2331 {
2333 case HEAPTUPLE_LIVE:
2334
2335 /*
2336 * Count both cases as live, just like lazy_scan_prune
2337 */
2338 live_tuples++;
2339
2340 break;
2341 case HEAPTUPLE_DEAD:
2342
2343 /*
2344 * There is some useful work for pruning to do, that won't be
2345 * done due to failure to get a cleanup lock.
2346 */
2347 missed_dead_tuples++;
2348 break;
2350
2351 /*
2352 * Count in recently_dead_tuples, just like lazy_scan_prune
2353 */
2354 recently_dead_tuples++;
2355 break;
2357
2358 /*
2359 * Do not count these rows as live, just like lazy_scan_prune
2360 */
2361 break;
2362 default:
2363 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2364 break;
2365 }
2366 }
2367
2368 vacrel->offnum = InvalidOffsetNumber;
2369
2370 /*
2371 * By here we know for sure that caller can put off freezing and pruning
2372 * this particular page until the next VACUUM. Remember its details now.
2373 * (lazy_scan_prune expects a clean slate, so we have to do this last.)
2374 */
2375 vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
2376 vacrel->NewRelminMxid = NoFreezePageRelminMxid;
2377
2378 /* Save any LP_DEAD items found on the page in dead_items */
2379 if (vacrel->nindexes == 0)
2380 {
2381 /* Using one-pass strategy (since table has no indexes) */
2382 if (lpdead_items > 0)
2383 {
2384 /*
2385 * Perfunctory handling for the corner case where a single pass
2386 * strategy VACUUM cannot get a cleanup lock, and it turns out
2387 * that there is one or more LP_DEAD items: just count the LP_DEAD
2388 * items as missed_dead_tuples instead. (This is a bit dishonest,
2389 * but it beats having to maintain specialized heap vacuuming code
2390 * forever, for vanishingly little benefit.)
2391 */
2392 hastup = true;
2393 missed_dead_tuples += lpdead_items;
2394 }
2395 }
2396 else if (lpdead_items > 0)
2397 {
2398 /*
2399 * Page has LP_DEAD items, and so any references/TIDs that remain in
2400 * indexes will be deleted during index vacuuming (and then marked
2401 * LP_UNUSED in the heap)
2402 */
2403 vacrel->lpdead_item_pages++;
2404
2405 dead_items_add(vacrel, blkno, deadoffsets, lpdead_items);
2406
2407 vacrel->lpdead_items += lpdead_items;
2408 }
2409
2410 /*
2411 * Finally, add relevant page-local counts to whole-VACUUM counts
2412 */
2413 vacrel->live_tuples += live_tuples;
2414 vacrel->recently_dead_tuples += recently_dead_tuples;
2415 vacrel->missed_dead_tuples += missed_dead_tuples;
2416 if (missed_dead_tuples > 0)
2417 vacrel->missed_dead_pages++;
2418
2419 /* Can't truncate this page */
2420 if (hastup)
2421 vacrel->nonempty_pages = blkno + 1;
2422
2423 /* Did we find LP_DEAD items? */
2424 *has_lpdead_items = (lpdead_items > 0);
2425
2426 /* Caller won't need to call lazy_scan_prune with same page */
2427 return true;
2428}
TransactionId MultiXactId
Definition: c.h:633
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition: heapam.c:7854
#define MaxHeapTuplesPerPage
Definition: htup_details.h:624
static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: vacuumlazy.c:3533

References LVRelState::aggressive, Assert(), buf, BufferGetBlockNumber(), LVRelState::cutoffs, dead_items_add(), elog, ERROR, FirstOffsetNumber, heap_tuple_should_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by lazy_scan_heap().

◆ lazy_scan_prune()

static void lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
Buffer  vmbuffer,
bool  all_visible_according_to_vm,
bool *  has_lpdead_items,
bool *  vm_page_frozen 
)
static

Definition at line 1940 of file vacuumlazy.c.

1948{
1949 Relation rel = vacrel->rel;
1950 PruneFreezeResult presult;
1951 int prune_options = 0;
1952
1953 Assert(BufferGetBlockNumber(buf) == blkno);
1954
1955 /*
1956 * Prune all HOT-update chains and potentially freeze tuples on this page.
1957 *
1958 * If the relation has no indexes, we can immediately mark would-be dead
1959 * items LP_UNUSED.
1960 *
1961 * The number of tuples removed from the page is returned in
1962 * presult.ndeleted. It should not be confused with presult.lpdead_items;
1963 * presult.lpdead_items's final value can be thought of as the number of
1964 * tuples that were deleted from indexes.
1965 *
1966 * We will update the VM after collecting LP_DEAD items and freezing
1967 * tuples. Pruning will have determined whether or not the page is
1968 * all-visible.
1969 */
1970 prune_options = HEAP_PAGE_PRUNE_FREEZE;
1971 if (vacrel->nindexes == 0)
1972 prune_options |= HEAP_PAGE_PRUNE_MARK_UNUSED_NOW;
1973
1974 heap_page_prune_and_freeze(rel, buf, vacrel->vistest, prune_options,
1975 &vacrel->cutoffs, &presult, PRUNE_VACUUM_SCAN,
1976 &vacrel->offnum,
1977 &vacrel->NewRelfrozenXid, &vacrel->NewRelminMxid);
1978
1981
1982 if (presult.nfrozen > 0)
1983 {
1984 /*
1985 * We don't increment the new_frozen_tuple_pages instrumentation
1986 * counter when nfrozen == 0, since it only counts pages with newly
1987 * frozen tuples (don't confuse that with pages newly set all-frozen
1988 * in VM).
1989 */
1990 vacrel->new_frozen_tuple_pages++;
1991 }
1992
1993 /*
1994 * VACUUM will call heap_page_is_all_visible() during the second pass over
1995 * the heap to determine all_visible and all_frozen for the page -- this
1996 * is a specialized version of the logic from this function. Now that
1997 * we've finished pruning and freezing, make sure that we're in total
1998 * agreement with heap_page_is_all_visible() using an assertion.
1999 */
2000#ifdef USE_ASSERT_CHECKING
2001 /* Note that all_frozen value does not matter when !all_visible */
2002 if (presult.all_visible)
2003 {
2004 TransactionId debug_cutoff;
2005 bool debug_all_frozen;
2006
2007 Assert(presult.lpdead_items == 0);
2008
2009 if (!heap_page_is_all_visible(vacrel, buf,
2010 &debug_cutoff, &debug_all_frozen))
2011 Assert(false);
2012
2013 Assert(presult.all_frozen == debug_all_frozen);
2014
2015 Assert(!TransactionIdIsValid(debug_cutoff) ||
2016 debug_cutoff == presult.vm_conflict_horizon);
2017 }
2018#endif
2019
2020 /*
2021 * Now save details of the LP_DEAD items from the page in vacrel
2022 */
2023 if (presult.lpdead_items > 0)
2024 {
2025 vacrel->lpdead_item_pages++;
2026
2027 /*
2028 * deadoffsets are collected incrementally in
2029 * heap_page_prune_and_freeze() as each dead line pointer is recorded,
2030 * with an indeterminate order, but dead_items_add requires them to be
2031 * sorted.
2032 */
2033 qsort(presult.deadoffsets, presult.lpdead_items, sizeof(OffsetNumber),
2035
2036 dead_items_add(vacrel, blkno, presult.deadoffsets, presult.lpdead_items);
2037 }
2038
2039 /* Finally, add page-local counts to whole-VACUUM counts */
2040 vacrel->tuples_deleted += presult.ndeleted;
2041 vacrel->tuples_frozen += presult.nfrozen;
2042 vacrel->lpdead_items += presult.lpdead_items;
2043 vacrel->live_tuples += presult.live_tuples;
2044 vacrel->recently_dead_tuples += presult.recently_dead_tuples;
2045
2046 /* Can't truncate this page */
2047 if (presult.hastup)
2048 vacrel->nonempty_pages = blkno + 1;
2049
2050 /* Did we find LP_DEAD items? */
2051 *has_lpdead_items = (presult.lpdead_items > 0);
2052
2053 Assert(!presult.all_visible || !(*has_lpdead_items));
2054
2055 /*
2056 * Handle setting visibility map bit based on information from the VM (as
2057 * of last heap_vac_scan_next_block() call), and from all_visible and
2058 * all_frozen variables
2059 */
2060 if (!all_visible_according_to_vm && presult.all_visible)
2061 {
2062 uint8 old_vmbits;
2064
2065 if (presult.all_frozen)
2066 {
2068 flags |= VISIBILITYMAP_ALL_FROZEN;
2069 }
2070
2071 /*
2072 * It should never be the case that the visibility map page is set
2073 * while the page-level bit is clear, but the reverse is allowed (if
2074 * checksums are not enabled). Regardless, set both bits so that we
2075 * get back in sync.
2076 *
2077 * NB: If the heap page is all-visible but the VM bit is not set, we
2078 * don't need to dirty the heap page. However, if checksums are
2079 * enabled, we do need to make sure that the heap page is dirtied
2080 * before passing it to visibilitymap_set(), because it may be logged.
2081 * Given that this situation should only happen in rare cases after a
2082 * crash, it is not worth optimizing.
2083 */
2084 PageSetAllVisible(page);
2086 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
2088 vmbuffer, presult.vm_conflict_horizon,
2089 flags);
2090
2091 /*
2092 * If the page wasn't already set all-visible and/or all-frozen in the
2093 * VM, count it as newly set for logging.
2094 */
2095 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2096 {
2097 vacrel->vm_new_visible_pages++;
2098 if (presult.all_frozen)
2099 {
2101 *vm_page_frozen = true;
2102 }
2103 }
2104 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
2105 presult.all_frozen)
2106 {
2107 vacrel->vm_new_frozen_pages++;
2108 *vm_page_frozen = true;
2109 }
2110 }
2111
2112 /*
2113 * As of PostgreSQL 9.2, the visibility map bit should never be set if the
2114 * page-level bit is clear. However, it's possible that the bit got
2115 * cleared after heap_vac_scan_next_block() was called, so we must recheck
2116 * with buffer lock before concluding that the VM is corrupt.
2117 */
2118 else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
2119 visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
2120 {
2121 elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
2122 vacrel->relname, blkno);
2123 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
2125 }
2126
2127 /*
2128 * It's possible for the value returned by
2129 * GetOldestNonRemovableTransactionId() to move backwards, so it's not
2130 * wrong for us to see tuples that appear to not be visible to everyone
2131 * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
2132 * never moves backwards, but GetOldestNonRemovableTransactionId() is
2133 * conservative and sometimes returns a value that's unnecessarily small,
2134 * so if we see that contradiction it just means that the tuples that we
2135 * think are not visible to everyone yet actually are, and the
2136 * PD_ALL_VISIBLE flag is correct.
2137 *
2138 * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
2139 * however.
2140 */
2141 else if (presult.lpdead_items > 0 && PageIsAllVisible(page))
2142 {
2143 elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
2144 vacrel->relname, blkno);
2145 PageClearAllVisible(page);
2147 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
2149 }
2150
2151 /*
2152 * If the all-visible page is all-frozen but not marked as such yet, mark
2153 * it as all-frozen. Note that all_frozen is only valid if all_visible is
2154 * true, so we must check both all_visible and all_frozen.
2155 */
2156 else if (all_visible_according_to_vm && presult.all_visible &&
2157 presult.all_frozen && !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
2158 {
2159 uint8 old_vmbits;
2160
2161 /*
2162 * Avoid relying on all_visible_according_to_vm as a proxy for the
2163 * page-level PD_ALL_VISIBLE bit being set, since it might have become
2164 * stale -- even when all_visible is set
2165 */
2166 if (!PageIsAllVisible(page))
2167 {
2168 PageSetAllVisible(page);
2170 }
2171
2172 /*
2173 * Set the page all-frozen (and all-visible) in the VM.
2174 *
2175 * We can pass InvalidTransactionId as our cutoff_xid, since a
2176 * snapshotConflictHorizon sufficient to make everything safe for REDO
2177 * was logged when the page's tuples were frozen.
2178 */
2180 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
2182 vmbuffer, InvalidTransactionId,
2185
2186 /*
2187 * The page was likely already set all-visible in the VM. However,
2188 * there is a small chance that it was modified sometime between
2189 * setting all_visible_according_to_vm and checking the visibility
2190 * during pruning. Check the return value of old_vmbits anyway to
2191 * ensure the visibility map counters used for logging are accurate.
2192 */
2193 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2194 {
2195 vacrel->vm_new_visible_pages++;
2197 *vm_page_frozen = true;
2198 }
2199
2200 /*
2201 * We already checked that the page was not set all-frozen in the VM
2202 * above, so we don't need to test the value of old_vmbits.
2203 */
2204 else
2205 {
2206 vacrel->vm_new_frozen_pages++;
2207 *vm_page_frozen = true;
2208 }
2209 }
2210}
static void PageClearAllVisible(Page page)
Definition: bufpage.h:439
#define HEAP_PAGE_PRUNE_FREEZE
Definition: heapam.h:43
@ PRUNE_VACUUM_SCAN
Definition: heapam.h:279
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
Definition: heapam.h:42
#define qsort(a, b, c, d)
Definition: port.h:475
void heap_page_prune_and_freeze(Relation relation, Buffer buffer, GlobalVisState *vistest, int options, struct VacuumCutoffs *cutoffs, PruneFreezeResult *presult, PruneReason reason, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
Definition: pruneheap.c:350
int recently_dead_tuples
Definition: heapam.h:243
TransactionId vm_conflict_horizon
Definition: heapam.h:258
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]
Definition: heapam.h:272
bool all_visible
Definition: heapam.h:256
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:3600
static int cmpOffsetNumbers(const void *a, const void *b)
Definition: vacuumlazy.c:1917
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:26
#define VISIBILITYMAP_VALID_BITS

References PruneFreezeResult::all_frozen, PruneFreezeResult::all_visible, Assert(), buf, BufferGetBlockNumber(), cmpOffsetNumbers(), LVRelState::cutoffs, dead_items_add(), PruneFreezeResult::deadoffsets, elog, PruneFreezeResult::hastup, heap_page_is_all_visible(), heap_page_prune_and_freeze(), HEAP_PAGE_PRUNE_FREEZE, HEAP_PAGE_PRUNE_MARK_UNUSED_NOW, InvalidTransactionId, InvalidXLogRecPtr, LVRelState::live_tuples, PruneFreezeResult::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, PruneFreezeResult::lpdead_items, MarkBufferDirty(), MultiXactIdIsValid, PruneFreezeResult::ndeleted, LVRelState::new_frozen_tuple_pages, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, PruneFreezeResult::nfrozen, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, PageClearAllVisible(), PageIsAllVisible(), PageSetAllVisible(), PRUNE_VACUUM_SCAN, qsort, LVRelState::recently_dead_tuples, PruneFreezeResult::recently_dead_tuples, LVRelState::rel, LVRelState::relname, TransactionIdIsValid, LVRelState::tuples_deleted, LVRelState::tuples_frozen, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, LVRelState::vistest, VM_ALL_FROZEN, PruneFreezeResult::vm_conflict_horizon, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, and WARNING.

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 3195 of file vacuumlazy.c.

3196{
3197 BlockNumber orig_rel_pages = vacrel->rel_pages;
3198 BlockNumber new_rel_pages;
3199 bool lock_waiter_detected;
3200 int lock_retry;
3201
3202 /* Report that we are now truncating */
3205
3206 /* Update error traceback information one last time */
3209
3210 /*
3211 * Loop until no more truncating can be done.
3212 */
3213 do
3214 {
3215 /*
3216 * We need full exclusive lock on the relation in order to do
3217 * truncation. If we can't get it, give up rather than waiting --- we
3218 * don't want to block other backends, and we don't want to deadlock
3219 * (which is quite possible considering we already hold a lower-grade
3220 * lock).
3221 */
3222 lock_waiter_detected = false;
3223 lock_retry = 0;
3224 while (true)
3225 {
3227 break;
3228
3229 /*
3230 * Check for interrupts while trying to (re-)acquire the exclusive
3231 * lock.
3232 */
3234
3235 if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
3237 {
3238 /*
3239 * We failed to establish the lock in the specified number of
3240 * retries. This means we give up truncating.
3241 */
3242 ereport(vacrel->verbose ? INFO : DEBUG2,
3243 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
3244 vacrel->relname)));
3245 return;
3246 }
3247
3248 (void) WaitLatch(MyLatch,
3251 WAIT_EVENT_VACUUM_TRUNCATE);
3253 }
3254
3255 /*
3256 * Now that we have exclusive lock, look to see if the rel has grown
3257 * whilst we were vacuuming with non-exclusive lock. If so, give up;
3258 * the newly added pages presumably contain non-deletable tuples.
3259 */
3260 new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
3261 if (new_rel_pages != orig_rel_pages)
3262 {
3263 /*
3264 * Note: we intentionally don't update vacrel->rel_pages with the
3265 * new rel size here. If we did, it would amount to assuming that
3266 * the new pages are empty, which is unlikely. Leaving the numbers
3267 * alone amounts to assuming that the new pages have the same
3268 * tuple density as existing ones, which is less unlikely.
3269 */
3271 return;
3272 }
3273
3274 /*
3275 * Scan backwards from the end to verify that the end pages actually
3276 * contain no tuples. This is *necessary*, not optional, because
3277 * other backends could have added tuples to these pages whilst we
3278 * were vacuuming.
3279 */
3280 new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
3281 vacrel->blkno = new_rel_pages;
3282
3283 if (new_rel_pages >= orig_rel_pages)
3284 {
3285 /* can't do anything after all */
3287 return;
3288 }
3289
3290 /*
3291 * Okay to truncate.
3292 */
3293 RelationTruncate(vacrel->rel, new_rel_pages);
3294
3295 /*
3296 * We can release the exclusive lock as soon as we have truncated.
3297 * Other backends can't safely access the relation until they have
3298 * processed the smgr invalidation that smgrtruncate sent out ... but
3299 * that should happen as part of standard invalidation processing once
3300 * they acquire lock on the relation.
3301 */
3303
3304 /*
3305 * Update statistics. Here, it *is* correct to adjust rel_pages
3306 * without also touching reltuples, since the tuple count wasn't
3307 * changed by the truncation.
3308 */
3309 vacrel->removed_pages += orig_rel_pages - new_rel_pages;
3310 vacrel->rel_pages = new_rel_pages;
3311
3312 ereport(vacrel->verbose ? INFO : DEBUG2,
3313 (errmsg("table \"%s\": truncated %u to %u pages",
3314 vacrel->relname,
3315 orig_rel_pages, new_rel_pages)));
3316 orig_rel_pages = new_rel_pages;
3317 } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
3318}
struct Latch * MyLatch
Definition: globals.c:62
void ResetLatch(Latch *latch)
Definition: latch.c:372
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:172
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:314
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:278
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:38
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:288
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:180
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:181
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
Definition: vacuumlazy.c:3326
#define WL_TIMEOUT
Definition: waiteventset.h:37
#define WL_EXIT_ON_PM_DEATH
Definition: waiteventset.h:39
#define WL_LATCH_SET
Definition: waiteventset.h:34

References AccessExclusiveLock, LVRelState::blkno, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), INFO, InvalidOffsetNumber, MyLatch, LVRelState::nonempty_pages, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationTruncate(), LVRelState::relname, LVRelState::removed_pages, ResetLatch(), UnlockRelation(), update_vacuum_error_info(), VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, LVRelState::verbose, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 2444 of file vacuumlazy.c.

2445{
2446 bool bypass;
2447
2448 /* Should not end up here with no indexes */
2449 Assert(vacrel->nindexes > 0);
2450 Assert(vacrel->lpdead_item_pages > 0);
2451
2452 if (!vacrel->do_index_vacuuming)
2453 {
2454 Assert(!vacrel->do_index_cleanup);
2455 dead_items_reset(vacrel);
2456 return;
2457 }
2458
2459 /*
2460 * Consider bypassing index vacuuming (and heap vacuuming) entirely.
2461 *
2462 * We currently only do this in cases where the number of LP_DEAD items
2463 * for the entire VACUUM operation is close to zero. This avoids sharp
2464 * discontinuities in the duration and overhead of successive VACUUM
2465 * operations that run against the same table with a fixed workload.
2466 * Ideally, successive VACUUM operations will behave as if there are
2467 * exactly zero LP_DEAD items in cases where there are close to zero.
2468 *
2469 * This is likely to be helpful with a table that is continually affected
2470 * by UPDATEs that can mostly apply the HOT optimization, but occasionally
2471 * have small aberrations that lead to just a few heap pages retaining
2472 * only one or two LP_DEAD items. This is pretty common; even when the
2473 * DBA goes out of their way to make UPDATEs use HOT, it is practically
2474 * impossible to predict whether HOT will be applied in 100% of cases.
2475 * It's far easier to ensure that 99%+ of all UPDATEs against a table use
2476 * HOT through careful tuning.
2477 */
2478 bypass = false;
2479 if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
2480 {
2481 BlockNumber threshold;
2482
2483 Assert(vacrel->num_index_scans == 0);
2484 Assert(vacrel->lpdead_items == vacrel->dead_items_info->num_items);
2485 Assert(vacrel->do_index_vacuuming);
2486 Assert(vacrel->do_index_cleanup);
2487
2488 /*
2489 * This crossover point at which we'll start to do index vacuuming is
2490 * expressed as a percentage of the total number of heap pages in the
2491 * table that are known to have at least one LP_DEAD item. This is
2492 * much more important than the total number of LP_DEAD items, since
2493 * it's a proxy for the number of heap pages whose visibility map bits
2494 * cannot be set on account of bypassing index and heap vacuuming.
2495 *
2496 * We apply one further precautionary test: the space currently used
2497 * to store the TIDs (TIDs that now all point to LP_DEAD items) must
2498 * not exceed 32MB. This limits the risk that we will bypass index
2499 * vacuuming again and again until eventually there is a VACUUM whose
2500 * dead_items space is not CPU cache resident.
2501 *
2502 * We don't take any special steps to remember the LP_DEAD items (such
2503 * as counting them in our final update to the stats system) when the
2504 * optimization is applied. Though the accounting used in analyze.c's
2505 * acquire_sample_rows() will recognize the same LP_DEAD items as dead
2506 * rows in its own stats report, that's okay. The discrepancy should
2507 * be negligible. If this optimization is ever expanded to cover more
2508 * cases then this may need to be reconsidered.
2509 */
2510 threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
2511 bypass = (vacrel->lpdead_item_pages < threshold &&
2512 TidStoreMemoryUsage(vacrel->dead_items) < 32 * 1024 * 1024);
2513 }
2514
2515 if (bypass)
2516 {
2517 /*
2518 * There are almost zero TIDs. Behave as if there were precisely
2519 * zero: bypass index vacuuming, but do index cleanup.
2520 *
2521 * We expect that the ongoing VACUUM operation will finish very
2522 * quickly, so there is no point in considering speeding up as a
2523 * failsafe against wraparound failure. (Index cleanup is expected to
2524 * finish very quickly in cases where there were no ambulkdelete()
2525 * calls.)
2526 */
2527 vacrel->do_index_vacuuming = false;
2528 }
2529 else if (lazy_vacuum_all_indexes(vacrel))
2530 {
2531 /*
2532 * We successfully completed a round of index vacuuming. Do related
2533 * heap vacuuming now.
2534 */
2535 lazy_vacuum_heap_rel(vacrel);
2536 }
2537 else
2538 {
2539 /*
2540 * Failsafe case.
2541 *
2542 * We attempted index vacuuming, but didn't finish a full round/full
2543 * index scan. This happens when relfrozenxid or relminmxid is too
2544 * far in the past.
2545 *
2546 * From this point on the VACUUM operation will do no further index
2547 * vacuuming or heap vacuuming. This VACUUM operation won't end up
2548 * back here again.
2549 */
2551 }
2552
2553 /*
2554 * Forget the LP_DEAD items that we just vacuumed (or just decided to not
2555 * vacuum)
2556 */
2557 dead_items_reset(vacrel);
2558}
static void dead_items_reset(LVRelState *vacrel)
Definition: vacuumlazy.c:3555
#define BYPASS_THRESHOLD_PAGES
Definition: vacuumlazy.c:187
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2569
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
Definition: vacuumlazy.c:2712

References Assert(), BYPASS_THRESHOLD_PAGES, LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::dead_items_info, dead_items_reset(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, LVRelState::rel_pages, TidStoreMemoryUsage(), and VacuumFailsafeActive.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 2569 of file vacuumlazy.c.

2570{
2571 bool allindexes = true;
2572 double old_live_tuples = vacrel->rel->rd_rel->reltuples;
2573 const int progress_start_index[] = {
2576 };
2577 const int progress_end_index[] = {
2581 };
2582 int64 progress_start_val[2];
2583 int64 progress_end_val[3];
2584
2585 Assert(vacrel->nindexes > 0);
2586 Assert(vacrel->do_index_vacuuming);
2587 Assert(vacrel->do_index_cleanup);
2588
2589 /* Precheck for XID wraparound emergencies */
2591 {
2592 /* Wraparound emergency -- don't even start an index scan */
2593 return false;
2594 }
2595
2596 /*
2597 * Report that we are now vacuuming indexes and the number of indexes to
2598 * vacuum.
2599 */
2600 progress_start_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_INDEX;
2601 progress_start_val[1] = vacrel->nindexes;
2602 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
2603
2604 if (!ParallelVacuumIsActive(vacrel))
2605 {
2606 for (int idx = 0; idx < vacrel->nindexes; idx++)
2607 {
2608 Relation indrel = vacrel->indrels[idx];
2609 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2610
2611 vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat,
2612 old_live_tuples,
2613 vacrel);
2614
2615 /* Report the number of indexes vacuumed */
2617 idx + 1);
2618
2620 {
2621 /* Wraparound emergency -- end current index scan */
2622 allindexes = false;
2623 break;
2624 }
2625 }
2626 }
2627 else
2628 {
2629 /* Outsource everything to parallel variant */
2630 parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples,
2631 vacrel->num_index_scans);
2632
2633 /*
2634 * Do a postcheck to consider applying wraparound failsafe now. Note
2635 * that parallel VACUUM only gets the precheck and this postcheck.
2636 */
2638 allindexes = false;
2639 }
2640
2641 /*
2642 * We delete all LP_DEAD items from the first heap pass in all indexes on
2643 * each call here (except calls where we choose to do the failsafe). This
2644 * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2645 * of the failsafe triggering, which prevents the next call from taking
2646 * place).
2647 */
2648 Assert(vacrel->num_index_scans > 0 ||
2649 vacrel->dead_items_info->num_items == vacrel->lpdead_items);
2650 Assert(allindexes || VacuumFailsafeActive);
2651
2652 /*
2653 * Increase and report the number of index scans. Also, we reset
2654 * PROGRESS_VACUUM_INDEXES_TOTAL and PROGRESS_VACUUM_INDEXES_PROCESSED.
2655 *
2656 * We deliberately include the case where we started a round of bulk
2657 * deletes that we weren't able to finish due to the failsafe triggering.
2658 */
2659 vacrel->num_index_scans++;
2660 progress_end_val[0] = 0;
2661 progress_end_val[1] = 0;
2662 progress_end_val[2] = vacrel->num_index_scans;
2663 pgstat_progress_update_multi_param(3, progress_end_index, progress_end_val);
2664
2665 return allindexes;
2666}
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:35
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
Definition: vacuumlazy.c:3066
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)

References Assert(), LVRelState::dead_items_info, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, idx(), LVRelState::indrels, LVRelState::indstats, lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, LVRelState::pvs, RelationData::rd_rel, LVRelState::rel, and VacuumFailsafeActive.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static void lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
OffsetNumber deadoffsets,
int  num_offsets,
Buffer  vmbuffer 
)
static

Definition at line 2822 of file vacuumlazy.c.

2825{
2826 Page page = BufferGetPage(buffer);
2828 int nunused = 0;
2829 TransactionId visibility_cutoff_xid;
2830 bool all_frozen;
2831 LVSavedErrInfo saved_err_info;
2832
2833 Assert(vacrel->do_index_vacuuming);
2834
2836
2837 /* Update error traceback information */
2838 update_vacuum_error_info(vacrel, &saved_err_info,
2841
2843
2844 for (int i = 0; i < num_offsets; i++)
2845 {
2846 ItemId itemid;
2847 OffsetNumber toff = deadoffsets[i];
2848
2849 itemid = PageGetItemId(page, toff);
2850
2851 Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2852 ItemIdSetUnused(itemid);
2853 unused[nunused++] = toff;
2854 }
2855
2856 Assert(nunused > 0);
2857
2858 /* Attempt to truncate line pointer array now */
2860
2861 /*
2862 * Mark buffer dirty before we write WAL.
2863 */
2864 MarkBufferDirty(buffer);
2865
2866 /* XLOG stuff */
2867 if (RelationNeedsWAL(vacrel->rel))
2868 {
2869 log_heap_prune_and_freeze(vacrel->rel, buffer,
2871 false, /* no cleanup lock required */
2873 NULL, 0, /* frozen */
2874 NULL, 0, /* redirected */
2875 NULL, 0, /* dead */
2876 unused, nunused);
2877 }
2878
2879 /*
2880 * End critical section, so we safely can do visibility tests (which
2881 * possibly need to perform IO and allocate memory!). If we crash now the
2882 * page (including the corresponding vm bit) might not be marked all
2883 * visible, but that's fine. A later vacuum will fix that.
2884 */
2886
2887 /*
2888 * Now that we have removed the LP_DEAD items from the page, once again
2889 * check if the page has become all-visible. The page is already marked
2890 * dirty, exclusively locked, and, if needed, a full page image has been
2891 * emitted.
2892 */
2893 Assert(!PageIsAllVisible(page));
2894 if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
2895 &all_frozen))
2896 {
2897 uint8 old_vmbits;
2899
2900 if (all_frozen)
2901 {
2902 Assert(!TransactionIdIsValid(visibility_cutoff_xid));
2903 flags |= VISIBILITYMAP_ALL_FROZEN;
2904 }
2905
2906 PageSetAllVisible(page);
2907 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buffer,
2909 vmbuffer, visibility_cutoff_xid,
2910 flags);
2911
2912 /*
2913 * If the page wasn't already set all-visible and/or all-frozen in the
2914 * VM, count it as newly set for logging.
2915 */
2916 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2917 {
2918 vacrel->vm_new_visible_pages++;
2919 if (all_frozen)
2921 }
2922
2923 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
2924 all_frozen)
2925 vacrel->vm_new_frozen_pages++;
2926 }
2927
2928 /* Revert to the previous phase information for error traceback */
2929 restore_vacuum_error_info(vacrel, &saved_err_info);
2930}
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:824
@ PRUNE_VACUUM_CLEANUP
Definition: heapam.h:280
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Definition: pruneheap.c:2053

References Assert(), BufferGetPage(), LVRelState::do_index_vacuuming, END_CRIT_SECTION, heap_page_is_all_visible(), i, InvalidOffsetNumber, InvalidTransactionId, InvalidXLogRecPtr, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, log_heap_prune_and_freeze(), MarkBufferDirty(), MaxHeapTuplesPerPage, PageGetItemId(), PageIsAllVisible(), PageSetAllVisible(), PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PRUNE_VACUUM_CLEANUP, LVRelState::rel, RelationNeedsWAL, restore_vacuum_error_info(), START_CRIT_SECTION, TransactionIdIsValid, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, and LVRelState::vm_new_visible_pages.

Referenced by lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2712 of file vacuumlazy.c.

2713{
2714 ReadStream *stream;
2715 BlockNumber vacuumed_pages = 0;
2716 Buffer vmbuffer = InvalidBuffer;
2717 LVSavedErrInfo saved_err_info;
2718 TidStoreIter *iter;
2719
2720 Assert(vacrel->do_index_vacuuming);
2721 Assert(vacrel->do_index_cleanup);
2722 Assert(vacrel->num_index_scans > 0);
2723
2724 /* Report that we are now vacuuming the heap */
2727
2728 /* Update error traceback information */
2729 update_vacuum_error_info(vacrel, &saved_err_info,
2732
2733 iter = TidStoreBeginIterate(vacrel->dead_items);
2734
2735 /* Set up the read stream for vacuum's second pass through the heap */
2737 vacrel->bstrategy,
2738 vacrel->rel,
2741 iter,
2742 sizeof(TidStoreIterResult));
2743
2744 while (true)
2745 {
2746 BlockNumber blkno;
2747 Buffer buf;
2748 Page page;
2749 TidStoreIterResult *iter_result;
2750 Size freespace;
2752 int num_offsets;
2753
2754 vacuum_delay_point(false);
2755
2756 buf = read_stream_next_buffer(stream, (void **) &iter_result);
2757
2758 /* The relation is exhausted */
2759 if (!BufferIsValid(buf))
2760 break;
2761
2762 vacrel->blkno = blkno = BufferGetBlockNumber(buf);
2763
2764 Assert(iter_result);
2765 num_offsets = TidStoreGetBlockOffsets(iter_result, offsets, lengthof(offsets));
2766 Assert(num_offsets <= lengthof(offsets));
2767
2768 /*
2769 * Pin the visibility map page in case we need to mark the page
2770 * all-visible. In most cases this will be very cheap, because we'll
2771 * already have the correct page pinned anyway.
2772 */
2773 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
2774
2775 /* We need a non-cleanup exclusive lock to mark dead_items unused */
2777 lazy_vacuum_heap_page(vacrel, blkno, buf, offsets,
2778 num_offsets, vmbuffer);
2779
2780 /* Now that we've vacuumed the page, record its available space */
2781 page = BufferGetPage(buf);
2782 freespace = PageGetHeapFreeSpace(page);
2783
2785 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
2786 vacuumed_pages++;
2787 }
2788
2789 read_stream_end(stream);
2790 TidStoreEndIterate(iter);
2791
2792 vacrel->blkno = InvalidBlockNumber;
2793 if (BufferIsValid(vmbuffer))
2794 ReleaseBuffer(vmbuffer);
2795
2796 /*
2797 * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2798 * the second heap pass. No more, no less.
2799 */
2800 Assert(vacrel->num_index_scans > 1 ||
2801 (vacrel->dead_items_info->num_items == vacrel->lpdead_items &&
2802 vacuumed_pages == vacrel->lpdead_item_pages));
2803
2805 (errmsg("table \"%s\": removed %lld dead item identifiers in %u pages",
2806 vacrel->relname, (long long) vacrel->dead_items_info->num_items,
2807 vacuumed_pages)));
2808
2809 /* Revert to the previous phase information for error traceback */
2810 restore_vacuum_error_info(vacrel, &saved_err_info);
2811}
#define lengthof(array)
Definition: c.h:759
#define MaxOffsetNumber
Definition: off.h:28
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:36
TidStoreIter * TidStoreBeginIterate(TidStore *ts)
Definition: tidstore.c:471
void TidStoreEndIterate(TidStoreIter *iter)
Definition: tidstore.c:518
int TidStoreGetBlockOffsets(TidStoreIterResult *result, OffsetNumber *offsets, int max_offsets)
Definition: tidstore.c:566
static BlockNumber vacuum_reap_lp_read_stream_next(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: vacuumlazy.c:2674
static void lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
Definition: vacuumlazy.c:2822

References Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, ereport, errmsg(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_vacuum_heap_page(), lengthof, LockBuffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, MaxOffsetNumber, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), RecordPageWithFreeSpace(), LVRelState::rel, ReleaseBuffer(), LVRelState::relname, restore_vacuum_error_info(), TidStoreBeginIterate(), TidStoreEndIterate(), TidStoreGetBlockOffsets(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, vacuum_reap_lp_read_stream_next(), and visibilitymap_pin().

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 3066 of file vacuumlazy.c.

3068{
3069 IndexVacuumInfo ivinfo;
3070 LVSavedErrInfo saved_err_info;
3071
3072 ivinfo.index = indrel;
3073 ivinfo.heaprel = vacrel->rel;
3074 ivinfo.analyze_only = false;
3075 ivinfo.report_progress = false;
3076 ivinfo.estimated_count = true;
3077 ivinfo.message_level = DEBUG2;
3078 ivinfo.num_heap_tuples = reltuples;
3079 ivinfo.strategy = vacrel->bstrategy;
3080
3081 /*
3082 * Update error traceback information.
3083 *
3084 * The index name is saved during this phase and restored immediately
3085 * after this phase. See vacuum_error_callback.
3086 */
3087 Assert(vacrel->indname == NULL);
3088 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
3089 update_vacuum_error_info(vacrel, &saved_err_info,
3092
3093 /* Do bulk deletion */
3094 istat = vac_bulkdel_one_index(&ivinfo, istat, vacrel->dead_items,
3095 vacrel->dead_items_info);
3096
3097 /* Revert to the previous phase information for error traceback */
3098 restore_vacuum_error_info(vacrel, &saved_err_info);
3099 pfree(vacrel->indname);
3100 vacrel->indname = NULL;
3101
3102 return istat;
3103}
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, TidStore *dead_items, VacDeadItemsInfo *dead_items_info)
Definition: vacuum.c:2609

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3834 of file vacuumlazy.c.

3836{
3837 vacrel->blkno = saved_vacrel->blkno;
3838 vacrel->offnum = saved_vacrel->offnum;
3839 vacrel->phase = saved_vacrel->phase;
3840}
BlockNumber blkno
Definition: vacuumlazy.c:417
VacErrPhase phase
Definition: vacuumlazy.c:419
OffsetNumber offnum
Definition: vacuumlazy.c:418

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 3175 of file vacuumlazy.c.

3176{
3177 BlockNumber possibly_freeable;
3178
3179 if (!vacrel->do_rel_truncate || VacuumFailsafeActive)
3180 return false;
3181
3182 possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
3183 if (possibly_freeable > 0 &&
3184 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
3185 possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION))
3186 return true;
3187
3188 return false;
3189}
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:169
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:170

References LVRelState::do_rel_truncate, LVRelState::nonempty_pages, LVRelState::rel_pages, REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, and VacuumFailsafeActive.

Referenced by heap_vacuum_rel().

◆ update_relstats_all_indexes()

static void update_relstats_all_indexes ( LVRelState vacrel)
static

Definition at line 3716 of file vacuumlazy.c.

3717{
3718 Relation *indrels = vacrel->indrels;
3719 int nindexes = vacrel->nindexes;
3720 IndexBulkDeleteResult **indstats = vacrel->indstats;
3721
3722 Assert(vacrel->do_index_cleanup);
3723
3724 for (int idx = 0; idx < nindexes; idx++)
3725 {
3726 Relation indrel = indrels[idx];
3727 IndexBulkDeleteResult *istat = indstats[idx];
3728
3729 if (istat == NULL || istat->estimated_count)
3730 continue;
3731
3732 /* Update index statistics */
3733 vac_update_relstats(indrel,
3734 istat->num_pages,
3735 istat->num_index_tuples,
3736 0, 0,
3737 false,
3740 NULL, NULL, false);
3741 }
3742}
double num_index_tuples
Definition: genam.h:102

References Assert(), LVRelState::do_index_cleanup, IndexBulkDeleteResult::estimated_count, idx(), LVRelState::indrels, LVRelState::indstats, InvalidMultiXactId, InvalidTransactionId, LVRelState::nindexes, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by heap_vacuum_rel().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3815 of file vacuumlazy.c.

3817{
3818 if (saved_vacrel)
3819 {
3820 saved_vacrel->offnum = vacrel->offnum;
3821 saved_vacrel->blkno = vacrel->blkno;
3822 saved_vacrel->phase = vacrel->phase;
3823 }
3824
3825 vacrel->blkno = blkno;
3826 vacrel->offnum = offnum;
3827 vacrel->phase = phase;
3828}

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void *  arg)
static

Definition at line 3751 of file vacuumlazy.c.

3752{
3753 LVRelState *errinfo = arg;
3754
3755 switch (errinfo->phase)
3756 {
3758 if (BlockNumberIsValid(errinfo->blkno))
3759 {
3760 if (OffsetNumberIsValid(errinfo->offnum))
3761 errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3762 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3763 else
3764 errcontext("while scanning block %u of relation \"%s.%s\"",
3765 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3766 }
3767 else
3768 errcontext("while scanning relation \"%s.%s\"",
3769 errinfo->relnamespace, errinfo->relname);
3770 break;
3771
3773 if (BlockNumberIsValid(errinfo->blkno))
3774 {
3775 if (OffsetNumberIsValid(errinfo->offnum))
3776 errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3777 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3778 else
3779 errcontext("while vacuuming block %u of relation \"%s.%s\"",
3780 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3781 }
3782 else
3783 errcontext("while vacuuming relation \"%s.%s\"",
3784 errinfo->relnamespace, errinfo->relname);
3785 break;
3786
3788 errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3789 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3790 break;
3791
3793 errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3794 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3795 break;
3796
3798 if (BlockNumberIsValid(errinfo->blkno))
3799 errcontext("while truncating relation \"%s.%s\" to %u blocks",
3800 errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3801 break;
3802
3804 default:
3805 return; /* do nothing; the errinfo may not be
3806 * initialized */
3807 }
3808}
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define errcontext
Definition: elog.h:196
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
void * arg

References arg, LVRelState::blkno, BlockNumberIsValid(), errcontext, LVRelState::indname, LVRelState::offnum, OffsetNumberIsValid, LVRelState::phase, LVRelState::relname, LVRelState::relnamespace, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().

◆ vacuum_reap_lp_read_stream_next()

static BlockNumber vacuum_reap_lp_read_stream_next ( ReadStream stream,
void *  callback_private_data,
void *  per_buffer_data 
)
static

Definition at line 2674 of file vacuumlazy.c.

2677{
2678 TidStoreIter *iter = callback_private_data;
2679 TidStoreIterResult *iter_result;
2680
2681 iter_result = TidStoreIterateNext(iter);
2682 if (iter_result == NULL)
2683 return InvalidBlockNumber;
2684
2685 /*
2686 * Save the TidStoreIterResult for later, so we can extract the offsets.
2687 * It is safe to copy the result, according to TidStoreIterateNext().
2688 */
2689 memcpy(per_buffer_data, iter_result, sizeof(*iter_result));
2690
2691 return iter_result->blkno;
2692}
BlockNumber blkno
Definition: tidstore.h:29
TidStoreIterResult * TidStoreIterateNext(TidStoreIter *iter)
Definition: tidstore.c:493

References TidStoreIterResult::blkno, InvalidBlockNumber, and TidStoreIterateNext().

Referenced by lazy_vacuum_heap_rel().