PostgreSQL Source Code git master
Loading...
Searching...
No Matches
vacuumlazy.c File Reference
#include "postgres.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/tidstore.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xloginsert.h"
#include "catalog/storage.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "common/int.h"
#include "common/pg_prng.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/latch.h"
#include "storage/lmgr.h"
#include "storage/read_stream.h"
#include "utils/lsyscache.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 
#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2
 
#define EAGER_SCAN_REGION_SIZE   4096
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel)
 
static void heap_vacuum_eager_scan_setup (LVRelState *vacrel, const VacuumParams params)
 
static BlockNumber heap_vac_scan_next_block (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 
static void find_next_unskippable_block (LVRelState *vacrel, bool *skipsallvis)
 
static bool lazy_scan_new_or_empty (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
 
static void identify_and_fix_vm_corruption (Relation rel, Buffer heap_buffer, BlockNumber heap_blk, Page heap_page, int nlpdead_items, Buffer vmbuffer, uint8 *vmbits)
 
static int lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool *has_lpdead_items, bool *vm_page_frozen)
 
static bool lazy_scan_noprune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static void lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_add (LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
 
static void dead_items_reset (LVRelState *vacrel)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_would_be_all_visible (Relation rel, Buffer buf, TransactionId OldestXmin, OffsetNumber *deadoffsets, int ndeadoffsets, bool *all_frozen, TransactionId *visibility_cutoff_xid, OffsetNumber *logging_offnum)
 
static void update_relstats_all_indexes (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, const VacuumParams params, BufferAccessStrategy bstrategy)
 
static int cmpOffsetNumbers (const void *a, const void *b)
 
static BlockNumber vacuum_reap_lp_read_stream_next (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 185 of file vacuumlazy.c.

◆ EAGER_SCAN_REGION_SIZE

#define EAGER_SCAN_REGION_SIZE   4096

Definition at line 248 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 191 of file vacuumlazy.c.

222{
230
231/*
232 * An eager scan of a page that is set all-frozen in the VM is considered
233 * "successful". To spread out freezing overhead across multiple normal
234 * vacuums, we limit the number of successful eager page freezes. The maximum
235 * number of eager page freezes is calculated as a ratio of the all-visible
236 * but not all-frozen pages at the beginning of the vacuum.
237 */
238#define MAX_EAGER_FREEZE_SUCCESS_RATE 0.2
239
240/*
241 * On the assumption that different regions of the table tend to have
242 * similarly aged data, once vacuum fails to freeze
243 * vacuum_max_eager_freeze_failure_rate of the blocks in a region of size
244 * EAGER_SCAN_REGION_SIZE, it suspends eager scanning until it has progressed
245 * to another region of the table with potentially older data.
246 */
247#define EAGER_SCAN_REGION_SIZE 4096
248
249typedef struct LVRelState
250{
251 /* Target heap relation and its indexes */
254 int nindexes;
255
256 /* Buffer access strategy and parallel vacuum state */
259
260 /* Aggressive VACUUM? (must set relfrozenxid >= FreezeLimit) */
261 bool aggressive;
262 /* Use visibility map to skip? (disabled by DISABLE_PAGE_SKIPPING) */
263 bool skipwithvm;
264 /* Consider index vacuuming bypass optimization? */
266
267 /* Doing index vacuuming, index cleanup, rel truncation? */
269 bool do_index_cleanup;
270 bool do_rel_truncate;
271
272 /* VACUUM operation's cutoffs for freezing and pruning */
273 struct VacuumCutoffs cutoffs;
275 /* Tracks oldest extant XID/MXID for setting relfrozenxid/relminmxid */
278 bool skippedallvis;
279
280 /* Error reporting state */
281 char *dbname;
282 char *relnamespace;
283 char *relname;
284 char *indname; /* Current index name */
285 BlockNumber blkno; /* used only for heap operations */
286 OffsetNumber offnum; /* used only for heap operations */
288 bool verbose; /* VACUUM VERBOSE? */
289
290 /*
291 * dead_items stores TIDs whose index tuples are deleted by index
292 * vacuuming. Each TID points to an LP_DEAD line pointer from a heap page
293 * that has been processed by lazy_scan_prune. Also needed by
294 * lazy_vacuum_heap_rel, which marks the same LP_DEAD line pointers as
295 * LP_UNUSED during second heap pass.
296 *
297 * Both dead_items and dead_items_info are allocated in shared memory in
298 * parallel vacuum cases.
299 */
300 TidStore *dead_items; /* TIDs whose index tuples we'll delete */
302
303 BlockNumber rel_pages; /* total number of pages */
304 BlockNumber scanned_pages; /* # pages examined (not skipped via VM) */
305
306 /*
307 * Count of all-visible blocks eagerly scanned (for logging only). This
308 * does not include skippable blocks scanned due to SKIP_PAGES_THRESHOLD.
309 */
311
312 BlockNumber removed_pages; /* # pages removed by relation truncation */
313 BlockNumber new_frozen_tuple_pages; /* # pages with newly frozen tuples */
314
315 /* # pages newly set all-visible in the VM */
317
318 /*
319 * # pages newly set all-visible and all-frozen in the VM. This is a
320 * subset of new_all_visible_pages. That is, new_all_visible_pages
321 * includes all pages set all-visible, but
322 * new_all_visible_all_frozen_pages includes only those which were also
323 * set all-frozen.
324 */
326
327 /* # all-visible pages newly set all-frozen in the VM */
329
330 BlockNumber lpdead_item_pages; /* # pages with LP_DEAD items */
331 BlockNumber missed_dead_pages; /* # pages with missed dead tuples */
332 BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
333
334 /* Statistics output by us, for table */
335 double new_rel_tuples; /* new estimated total # of tuples */
336 double new_live_tuples; /* new estimated total # of live tuples */
337 /* Statistics output by index AMs */
339
340 /* Instrumentation counters */
341 int num_index_scans;
344 /* Counters that follow are only for scanned_pages */
345 int64 tuples_deleted; /* # deleted from table */
346 int64 tuples_frozen; /* # newly frozen */
347 int64 lpdead_items; /* # deleted from indexes */
348 int64 live_tuples; /* # live tuples remaining */
349 int64 recently_dead_tuples; /* # dead, but not yet removable */
350 int64 missed_dead_tuples; /* # removable, but not removed */
351
352 /* State maintained by heap_vac_scan_next_block() */
353 BlockNumber current_block; /* last block returned */
354 BlockNumber next_unskippable_block; /* next unskippable block */
355 bool next_unskippable_eager_scanned; /* if it was eagerly scanned */
356 Buffer next_unskippable_vmbuffer; /* buffer containing its VM bit */
357
358 /* State related to managing eager scanning of all-visible pages */
359
360 /*
361 * A normal vacuum that has failed to freeze too many eagerly scanned
362 * blocks in a region suspends eager scanning.
363 * next_eager_scan_region_start is the block number of the first block
364 * eligible for resumed eager scanning.
365 *
366 * When eager scanning is permanently disabled, either initially
367 * (including for aggressive vacuum) or due to hitting the success cap,
368 * this is set to InvalidBlockNumber.
369 */
371
372 /*
373 * The remaining number of blocks a normal vacuum will consider eager
374 * scanning when it is successful. When eager scanning is enabled, this is
375 * initialized to MAX_EAGER_FREEZE_SUCCESS_RATE of the total number of
376 * all-visible but not all-frozen pages. For each eager freeze success,
377 * this is decremented. Once it hits 0, eager scanning is permanently
378 * disabled. It is initialized to 0 if eager scanning starts out disabled
379 * (including for aggressive vacuum).
380 */
382
383 /*
384 * The maximum number of blocks which may be eagerly scanned and not
385 * frozen before eager scanning is temporarily suspended. This is
386 * configurable both globally, via the
387 * vacuum_max_eager_freeze_failure_rate GUC, and per table, with a table
388 * storage parameter of the same name. It is calculated as
389 * vacuum_max_eager_freeze_failure_rate of EAGER_SCAN_REGION_SIZE blocks.
390 * It is 0 when eager scanning is disabled.
391 */
393
394 /*
395 * The number of eagerly scanned blocks vacuum failed to freeze (due to
396 * age) in the current eager scan region. Vacuum resets it to
397 * eager_scan_max_fails_per_region each time it enters a new region of the
398 * relation. If eager_scan_remaining_fails hits 0, eager scanning is
399 * suspended until the next region. It is also 0 if eager scanning has
400 * been permanently disabled.
401 */
403} LVRelState;
404
405
406/* Struct for saving and restoring vacuum error information. */
407typedef struct LVSavedErrInfo
408{
413
414
415/* non-export function prototypes */
416static void lazy_scan_heap(LVRelState *vacrel);
418 const VacuumParams params);
420 void *callback_private_data,
421 void *per_buffer_data);
424 BlockNumber blkno, Page page,
425 bool sharelock, Buffer vmbuffer);
428 int nlpdead_items,
429 Buffer vmbuffer,
430 uint8 *vmbits);
432 BlockNumber blkno, Page page,
433 Buffer vmbuffer,
434 bool *has_lpdead_items, bool *vm_page_frozen);
436 BlockNumber blkno, Page page,
437 bool *has_lpdead_items);
438static void lazy_vacuum(LVRelState *vacrel);
442 Buffer buffer, OffsetNumber *deadoffsets,
443 int num_offsets, Buffer vmbuffer);
448 double reltuples,
452 double reltuples,
453 bool estimated_count,
459static void dead_items_alloc(LVRelState *vacrel, int nworkers);
460static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets,
461 int num_offsets);
464
465#ifdef USE_ASSERT_CHECKING
467 TransactionId OldestXmin,
468 bool *all_frozen,
469 TransactionId *visibility_cutoff_xid,
471#endif
473 TransactionId OldestXmin,
474 OffsetNumber *deadoffsets,
475 int ndeadoffsets,
476 bool *all_frozen,
477 TransactionId *visibility_cutoff_xid,
480static void vacuum_error_callback(void *arg);
483 int phase, BlockNumber blkno,
484 OffsetNumber offnum);
487
488
489
490/*
491 * Helper to set up the eager scanning state for vacuuming a single relation.
492 * Initializes the eager scan management related members of the LVRelState.
493 *
494 * Caller provides whether or not an aggressive vacuum is required due to
495 * vacuum options or for relfrozenxid/relminmxid advancement.
496 */
497static void
499{
503 float first_region_ratio;
505
506 /*
507 * Initialize eager scan management fields to their disabled values.
508 * Aggressive vacuums, normal vacuums of small tables, and normal vacuums
509 * of tables without sufficiently old tuples disable eager scanning.
510 */
511 vacrel->next_eager_scan_region_start = InvalidBlockNumber;
512 vacrel->eager_scan_max_fails_per_region = 0;
513 vacrel->eager_scan_remaining_fails = 0;
514 vacrel->eager_scan_remaining_successes = 0;
515
516 /* If eager scanning is explicitly disabled, just return. */
517 if (params.max_eager_freeze_failure_rate == 0)
518 return;
519
520 /*
521 * The caller will have determined whether or not an aggressive vacuum is
522 * required by either the vacuum parameters or the relative age of the
523 * oldest unfrozen transaction IDs. An aggressive vacuum must scan every
524 * all-visible page to safely advance the relfrozenxid and/or relminmxid,
525 * so scans of all-visible pages are not considered eager.
526 */
527 if (vacrel->aggressive)
528 return;
529
530 /*
531 * Aggressively vacuuming a small relation shouldn't take long, so it
532 * isn't worth amortizing. We use two times the region size as the size
533 * cutoff because the eager scan start block is a random spot somewhere in
534 * the first region, making the second region the first to be eager
535 * scanned normally.
536 */
537 if (vacrel->rel_pages < 2 * EAGER_SCAN_REGION_SIZE)
538 return;
539
540 /*
541 * We only want to enable eager scanning if we are likely to be able to
542 * freeze some of the pages in the relation.
543 *
544 * Tuples with XIDs older than OldestXmin or MXIDs older than OldestMxact
545 * are technically freezable, but we won't freeze them unless the criteria
546 * for opportunistic freezing is met. Only tuples with XIDs/MXIDs older
547 * than the FreezeLimit/MultiXactCutoff are frozen in the common case.
548 *
549 * So, as a heuristic, we wait until the FreezeLimit has advanced past the
550 * relfrozenxid or the MultiXactCutoff has advanced past the relminmxid to
551 * enable eager scanning.
552 */
553 if (TransactionIdIsNormal(vacrel->cutoffs.relfrozenxid) &&
554 TransactionIdPrecedes(vacrel->cutoffs.relfrozenxid,
555 vacrel->cutoffs.FreezeLimit))
557
559 MultiXactIdIsValid(vacrel->cutoffs.relminmxid) &&
560 MultiXactIdPrecedes(vacrel->cutoffs.relminmxid,
561 vacrel->cutoffs.MultiXactCutoff))
563
565 return;
566
567 /* We have met the criteria to eagerly scan some pages. */
568
569 /*
570 * Our success cap is MAX_EAGER_FREEZE_SUCCESS_RATE of the number of
571 * all-visible but not all-frozen blocks in the relation.
572 */
574
575 vacrel->eager_scan_remaining_successes =
578
579 /* If every all-visible page is frozen, eager scanning is disabled. */
580 if (vacrel->eager_scan_remaining_successes == 0)
581 return;
582
583 /*
584 * Now calculate the bounds of the first eager scan region. Its end block
585 * will be a random spot somewhere in the first EAGER_SCAN_REGION_SIZE
586 * blocks. This affects the bounds of all subsequent regions and avoids
587 * eager scanning and failing to freeze the same blocks each vacuum of the
588 * relation.
589 */
591
592 vacrel->next_eager_scan_region_start = randseed % EAGER_SCAN_REGION_SIZE;
593
596
597 vacrel->eager_scan_max_fails_per_region =
600
601 /*
602 * The first region will be smaller than subsequent regions. As such,
603 * adjust the eager freeze failures tolerated for this region.
604 */
605 first_region_ratio = 1 - (float) vacrel->next_eager_scan_region_start /
607
608 vacrel->eager_scan_remaining_fails =
609 vacrel->eager_scan_max_fails_per_region *
611}
612
613/*
614 * heap_vacuum_rel() -- perform VACUUM for one heap relation
615 *
616 * This routine sets things up for and then calls lazy_scan_heap, where
617 * almost all work actually takes place. Finalizes everything after call
618 * returns by managing relation truncation and updating rel's pg_class
619 * entry. (Also updates pg_class entries for any indexes that need it.)
620 *
621 * At entry, we have already established a transaction and opened
622 * and locked the relation.
623 */
624void
625heap_vacuum_rel(Relation rel, const VacuumParams params,
626 BufferAccessStrategy bstrategy)
627{
629 bool verbose,
630 instrument,
631 skipwithvm,
639 TimestampTz starttime = 0;
641 startwritetime = 0;
644 ErrorContextCallback errcallback;
645 char **indnames = NULL;
647
648 verbose = (params.options & VACOPT_VERBOSE) != 0;
649 instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
650 params.log_vacuum_min_duration >= 0));
651 if (instrument)
652 {
654 if (track_io_timing)
655 {
658 }
659 }
660
661 /* Used for instrumentation and stats report */
662 starttime = GetCurrentTimestamp();
663
665 RelationGetRelid(rel));
668 params.is_wraparound
671 else
674
675 /*
676 * Setup error traceback support for ereport() first. The idea is to set
677 * up an error context callback to display additional information on any
678 * error during a vacuum. During different phases of vacuum, we update
679 * the state so that the error context callback always display current
680 * information.
681 *
682 * Copy the names of heap rel into local memory for error reporting
683 * purposes, too. It isn't always safe to assume that we can get the name
684 * of each rel. It's convenient for code in lazy_scan_heap to always use
685 * these temp copies.
686 */
689 vacrel->relnamespace = get_namespace_name(RelationGetNamespace(rel));
690 vacrel->relname = pstrdup(RelationGetRelationName(rel));
691 vacrel->indname = NULL;
693 vacrel->verbose = verbose;
694 errcallback.callback = vacuum_error_callback;
695 errcallback.arg = vacrel;
696 errcallback.previous = error_context_stack;
697 error_context_stack = &errcallback;
698
699 /* Set up high level stuff about rel and its indexes */
700 vacrel->rel = rel;
702 &vacrel->indrels);
703 vacrel->bstrategy = bstrategy;
704 if (instrument && vacrel->nindexes > 0)
705 {
706 /* Copy index names used by instrumentation (not error reporting) */
707 indnames = palloc_array(char *, vacrel->nindexes);
708 for (int i = 0; i < vacrel->nindexes; i++)
710 }
711
712 /*
713 * The index_cleanup param either disables index vacuuming and cleanup or
714 * forces it to go ahead when we would otherwise apply the index bypass
715 * optimization. The default is 'auto', which leaves the final decision
716 * up to lazy_vacuum().
717 *
718 * The truncate param allows user to avoid attempting relation truncation,
719 * though it can't force truncation to happen.
720 */
723 params.truncate != VACOPTVALUE_AUTO);
724
725 /*
726 * While VacuumFailSafeActive is reset to false before calling this, we
727 * still need to reset it here due to recursive calls.
728 */
729 VacuumFailsafeActive = false;
730 vacrel->consider_bypass_optimization = true;
731 vacrel->do_index_vacuuming = true;
732 vacrel->do_index_cleanup = true;
733 vacrel->do_rel_truncate = (params.truncate != VACOPTVALUE_DISABLED);
735 {
736 /* Force disable index vacuuming up-front */
737 vacrel->do_index_vacuuming = false;
738 vacrel->do_index_cleanup = false;
739 }
740 else if (params.index_cleanup == VACOPTVALUE_ENABLED)
741 {
742 /* Force index vacuuming. Note that failsafe can still bypass. */
743 vacrel->consider_bypass_optimization = false;
744 }
745 else
746 {
747 /* Default/auto, make all decisions dynamically */
749 }
750
751 /* Initialize page counters explicitly (be tidy) */
752 vacrel->scanned_pages = 0;
753 vacrel->eager_scanned_pages = 0;
754 vacrel->removed_pages = 0;
755 vacrel->new_frozen_tuple_pages = 0;
756 vacrel->lpdead_item_pages = 0;
757 vacrel->missed_dead_pages = 0;
758 vacrel->nonempty_pages = 0;
759 /* dead_items_alloc allocates vacrel->dead_items later on */
760
761 /* Allocate/initialize output statistics state */
762 vacrel->new_rel_tuples = 0;
763 vacrel->new_live_tuples = 0;
764 vacrel->indstats = (IndexBulkDeleteResult **)
765 palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
766
767 /* Initialize remaining counters (be tidy) */
768 vacrel->num_index_scans = 0;
769 vacrel->num_dead_items_resets = 0;
770 vacrel->total_dead_items_bytes = 0;
771 vacrel->tuples_deleted = 0;
772 vacrel->tuples_frozen = 0;
773 vacrel->lpdead_items = 0;
774 vacrel->live_tuples = 0;
775 vacrel->recently_dead_tuples = 0;
776 vacrel->missed_dead_tuples = 0;
777
778 vacrel->new_all_visible_pages = 0;
779 vacrel->new_all_visible_all_frozen_pages = 0;
780 vacrel->new_all_frozen_pages = 0;
781
782 /*
783 * Get cutoffs that determine which deleted tuples are considered DEAD,
784 * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
785 * the extent of the blocks that we'll scan in lazy_scan_heap. It has to
786 * happen in this order to ensure that the OldestXmin cutoff field works
787 * as an upper bound on the XIDs stored in the pages we'll actually scan
788 * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
789 *
790 * Next acquire vistest, a related cutoff that's used in pruning. We use
791 * vistest in combination with OldestXmin to ensure that
792 * heap_page_prune_and_freeze() always removes any deleted tuple whose
793 * xmax is < OldestXmin. lazy_scan_prune must never become confused about
794 * whether a tuple should be frozen or removed. (In the future we might
795 * want to teach lazy_scan_prune to recompute vistest from time to time,
796 * to increase the number of dead tuples it can prune away.)
797 */
798 vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
800 vacrel->vistest = GlobalVisTestFor(rel);
801
802 /* Initialize state used to track oldest extant XID/MXID */
803 vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
804 vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
805
806 /*
807 * Initialize state related to tracking all-visible page skipping. This is
808 * very important to determine whether or not it is safe to advance the
809 * relfrozenxid/relminmxid.
810 */
811 vacrel->skippedallvis = false;
812 skipwithvm = true;
814 {
815 /*
816 * Force aggressive mode, and disable skipping blocks using the
817 * visibility map (even those set all-frozen)
818 */
819 vacrel->aggressive = true;
820 skipwithvm = false;
821 }
822
823 vacrel->skipwithvm = skipwithvm;
824
825 /*
826 * Set up eager scan tracking state. This must happen after determining
827 * whether or not the vacuum must be aggressive, because only normal
828 * vacuums use the eager scan algorithm.
829 */
831
832 /* Report the vacuum mode: 'normal' or 'aggressive' */
834 vacrel->aggressive
837
838 if (verbose)
839 {
840 if (vacrel->aggressive)
842 (errmsg("aggressively vacuuming \"%s.%s.%s\"",
843 vacrel->dbname, vacrel->relnamespace,
844 vacrel->relname)));
845 else
847 (errmsg("vacuuming \"%s.%s.%s\"",
848 vacrel->dbname, vacrel->relnamespace,
849 vacrel->relname)));
850 }
851
852 /*
853 * Allocate dead_items memory using dead_items_alloc. This handles
854 * parallel VACUUM initialization as part of allocating shared memory
855 * space used for dead_items. (But do a failsafe precheck first, to
856 * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
857 * is already dangerously old.)
858 */
861
862 /*
863 * Call lazy_scan_heap to perform all required heap pruning, index
864 * vacuuming, and heap vacuuming (plus related processing)
865 */
867
868 /*
869 * Save dead items max_bytes and update the memory usage statistics before
870 * cleanup, they are freed in parallel vacuum cases during
871 * dead_items_cleanup().
872 */
873 dead_items_max_bytes = vacrel->dead_items_info->max_bytes;
874 vacrel->total_dead_items_bytes += TidStoreMemoryUsage(vacrel->dead_items);
875
876 /*
877 * Free resources managed by dead_items_alloc. This ends parallel mode in
878 * passing when necessary.
879 */
882
883 /*
884 * Update pg_class entries for each of rel's indexes where appropriate.
885 *
886 * Unlike the later update to rel's pg_class entry, this is not critical.
887 * Maintains relpages/reltuples statistics used by the planner only.
888 */
889 if (vacrel->do_index_cleanup)
891
892 /* Done with rel's indexes */
893 vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
894
895 /* Optionally truncate rel */
898
899 /* Pop the error context stack */
900 error_context_stack = errcallback.previous;
901
902 /* Report that we are now doing final cleanup */
905
906 /*
907 * Prepare to update rel's pg_class entry.
908 *
909 * Aggressive VACUUMs must always be able to advance relfrozenxid to a
910 * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
911 * Non-aggressive VACUUMs may advance them by any amount, or not at all.
912 */
913 Assert(vacrel->NewRelfrozenXid == vacrel->cutoffs.OldestXmin ||
914 TransactionIdPrecedesOrEquals(vacrel->aggressive ? vacrel->cutoffs.FreezeLimit :
915 vacrel->cutoffs.relfrozenxid,
916 vacrel->NewRelfrozenXid));
917 Assert(vacrel->NewRelminMxid == vacrel->cutoffs.OldestMxact ||
918 MultiXactIdPrecedesOrEquals(vacrel->aggressive ? vacrel->cutoffs.MultiXactCutoff :
919 vacrel->cutoffs.relminmxid,
920 vacrel->NewRelminMxid));
921 if (vacrel->skippedallvis)
922 {
923 /*
924 * Must keep original relfrozenxid in a non-aggressive VACUUM that
925 * chose to skip an all-visible page range. The state that tracks new
926 * values will have missed unfrozen XIDs from the pages we skipped.
927 */
928 Assert(!vacrel->aggressive);
929 vacrel->NewRelfrozenXid = InvalidTransactionId;
930 vacrel->NewRelminMxid = InvalidMultiXactId;
931 }
932
933 /*
934 * For safety, clamp relallvisible to be not more than what we're setting
935 * pg_class.relpages to
936 */
937 new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
941
942 /*
943 * An all-frozen block _must_ be all-visible. As such, clamp the count of
944 * all-frozen blocks to the count of all-visible blocks. This matches the
945 * clamping of relallvisible above.
946 */
949
950 /*
951 * Now actually update rel's pg_class entry.
952 *
953 * In principle new_live_tuples could be -1 indicating that we (still)
954 * don't know the tuple count. In practice that can't happen, since we
955 * scan every page that isn't skipped using the visibility map.
956 */
957 vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
959 vacrel->nindexes > 0,
960 vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
962
963 /*
964 * Report results to the cumulative stats system, too.
965 *
966 * Deliberately avoid telling the stats system about LP_DEAD items that
967 * remain in the table due to VACUUM bypassing index and heap vacuuming.
968 * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
969 * It seems like a good idea to err on the side of not vacuuming again too
970 * soon in cases where the failsafe prevented significant amounts of heap
971 * vacuuming.
972 */
974 Max(vacrel->new_live_tuples, 0),
975 vacrel->recently_dead_tuples +
976 vacrel->missed_dead_tuples,
977 starttime);
979
980 if (instrument)
981 {
983
984 if (verbose || params.log_vacuum_min_duration == 0 ||
987 {
988 long secs_dur;
989 int usecs_dur;
990 WalUsage walusage;
991 BufferUsage bufferusage;
993 char *msgfmt;
994 int32 diff;
995 double read_rate = 0,
996 write_rate = 0;
1000
1002 memset(&walusage, 0, sizeof(WalUsage));
1004 memset(&bufferusage, 0, sizeof(BufferUsage));
1006
1007 total_blks_hit = bufferusage.shared_blks_hit +
1008 bufferusage.local_blks_hit;
1009 total_blks_read = bufferusage.shared_blks_read +
1010 bufferusage.local_blks_read;
1012 bufferusage.local_blks_dirtied;
1013
1015 if (verbose)
1016 {
1017 /*
1018 * Aggressiveness already reported earlier, in dedicated
1019 * VACUUM VERBOSE ereport
1020 */
1021 Assert(!params.is_wraparound);
1022 msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
1023 }
1024 else if (params.is_wraparound)
1025 {
1026 /*
1027 * While it's possible for a VACUUM to be both is_wraparound
1028 * and !aggressive, that's just a corner-case -- is_wraparound
1029 * implies aggressive. Produce distinct output for the corner
1030 * case all the same, just in case.
1031 */
1032 if (vacrel->aggressive)
1033 msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1034 else
1035 msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1036 }
1037 else
1038 {
1039 if (vacrel->aggressive)
1040 msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
1041 else
1042 msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
1043 }
1045 vacrel->dbname,
1046 vacrel->relnamespace,
1047 vacrel->relname,
1048 vacrel->num_index_scans);
1049 appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total), %u eagerly scanned\n"),
1050 vacrel->removed_pages,
1052 vacrel->scanned_pages,
1053 orig_rel_pages == 0 ? 100.0 :
1054 100.0 * vacrel->scanned_pages /
1056 vacrel->eager_scanned_pages);
1058 _("tuples: %" PRId64 " removed, %" PRId64 " remain, %" PRId64 " are dead but not yet removable\n"),
1059 vacrel->tuples_deleted,
1060 (int64) vacrel->new_rel_tuples,
1061 vacrel->recently_dead_tuples);
1062 if (vacrel->missed_dead_tuples > 0)
1064 _("tuples missed: %" PRId64 " dead from %u pages not removed due to cleanup lock contention\n"),
1065 vacrel->missed_dead_tuples,
1066 vacrel->missed_dead_pages);
1068 vacrel->cutoffs.OldestXmin);
1070 _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
1071 vacrel->cutoffs.OldestXmin, diff);
1073 {
1074 diff = (int32) (vacrel->NewRelfrozenXid -
1075 vacrel->cutoffs.relfrozenxid);
1077 _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
1078 vacrel->NewRelfrozenXid, diff);
1079 }
1080 if (minmulti_updated)
1081 {
1082 diff = (int32) (vacrel->NewRelminMxid -
1083 vacrel->cutoffs.relminmxid);
1085 _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
1086 vacrel->NewRelminMxid, diff);
1087 }
1088 appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %" PRId64 " tuples frozen\n"),
1089 vacrel->new_frozen_tuple_pages,
1090 orig_rel_pages == 0 ? 100.0 :
1091 100.0 * vacrel->new_frozen_tuple_pages /
1093 vacrel->tuples_frozen);
1094
1096 _("visibility map: %u pages set all-visible, %u pages set all-frozen (%u were all-visible)\n"),
1097 vacrel->new_all_visible_pages,
1098 vacrel->new_all_visible_all_frozen_pages +
1099 vacrel->new_all_frozen_pages,
1100 vacrel->new_all_frozen_pages);
1101 if (vacrel->do_index_vacuuming)
1102 {
1103 if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
1104 appendStringInfoString(&buf, _("index scan not needed: "));
1105 else
1106 appendStringInfoString(&buf, _("index scan needed: "));
1107
1108 msgfmt = _("%u pages from table (%.2f%% of total) had %" PRId64 " dead item identifiers removed\n");
1109 }
1110 else
1111 {
1113 appendStringInfoString(&buf, _("index scan bypassed: "));
1114 else
1115 appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
1116
1117 msgfmt = _("%u pages from table (%.2f%% of total) have %" PRId64 " dead item identifiers\n");
1118 }
1120 vacrel->lpdead_item_pages,
1121 orig_rel_pages == 0 ? 100.0 :
1122 100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
1123 vacrel->lpdead_items);
1124 for (int i = 0; i < vacrel->nindexes; i++)
1125 {
1126 IndexBulkDeleteResult *istat = vacrel->indstats[i];
1127
1128 if (!istat)
1129 continue;
1130
1132 _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
1133 indnames[i],
1134 istat->num_pages,
1135 istat->pages_newly_deleted,
1136 istat->pages_deleted,
1137 istat->pages_free);
1138 }
1140 {
1141 /*
1142 * We bypass the changecount mechanism because this value is
1143 * only updated by the calling process. We also rely on the
1144 * above call to pgstat_progress_end_command() to not clear
1145 * the st_progress_param array.
1146 */
1147 appendStringInfo(&buf, _("delay time: %.3f ms\n"),
1149 }
1150 if (track_io_timing)
1151 {
1152 double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
1153 double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
1154
1155 appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
1156 read_ms, write_ms);
1157 }
1158 if (secs_dur > 0 || usecs_dur > 0)
1159 {
1161 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1163 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1164 }
1165 appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
1168 _("buffer usage: %" PRId64 " hits, %" PRId64 " reads, %" PRId64 " dirtied\n"),
1173 _("WAL usage: %" PRId64 " records, %" PRId64 " full page images, %" PRIu64 " bytes, %" PRIu64 " full page image bytes, %" PRId64 " buffers full\n"),
1174 walusage.wal_records,
1175 walusage.wal_fpi,
1176 walusage.wal_bytes,
1177 walusage.wal_fpi_bytes,
1178 walusage.wal_buffers_full);
1179
1180 /*
1181 * Report the dead items memory usage.
1182 *
1183 * The num_dead_items_resets counter increases when we reset the
1184 * collected dead items, so the counter is non-zero if at least
1185 * one dead items are collected, even if index vacuuming is
1186 * disabled.
1187 */
1189 ngettext("memory usage: dead item storage %.2f MB accumulated across %d reset (limit %.2f MB each)\n",
1190 "memory usage: dead item storage %.2f MB accumulated across %d resets (limit %.2f MB each)\n",
1191 vacrel->num_dead_items_resets),
1192 (double) vacrel->total_dead_items_bytes / (1024 * 1024),
1193 vacrel->num_dead_items_resets,
1194 (double) dead_items_max_bytes / (1024 * 1024));
1195 appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
1196
1197 ereport(verbose ? INFO : LOG,
1198 (errmsg_internal("%s", buf.data)));
1199 pfree(buf.data);
1200 }
1201 }
1202
1203 /* Cleanup index statistics and index names */
1204 for (int i = 0; i < vacrel->nindexes; i++)
1205 {
1206 if (vacrel->indstats[i])
1207 pfree(vacrel->indstats[i]);
1208
1209 if (instrument)
1210 pfree(indnames[i]);
1211 }
1212}
1213
1214/*
1215 * lazy_scan_heap() -- workhorse function for VACUUM
1216 *
1217 * This routine prunes each page in the heap, and considers the need to
1218 * freeze remaining tuples with storage (not including pages that can be
1219 * skipped using the visibility map). Also performs related maintenance
1220 * of the FSM and visibility map. These steps all take place during an
1221 * initial pass over the target heap relation.
1222 *
1223 * Also invokes lazy_vacuum_all_indexes to vacuum indexes, which largely
1224 * consists of deleting index tuples that point to LP_DEAD items left in
1225 * heap pages following pruning. Earlier initial pass over the heap will
1226 * have collected the TIDs whose index tuples need to be removed.
1227 *
1228 * Finally, invokes lazy_vacuum_heap_rel to vacuum heap pages, which
1229 * largely consists of marking LP_DEAD items (from vacrel->dead_items)
1230 * as LP_UNUSED. This has to happen in a second, final pass over the
1231 * heap, to preserve a basic invariant that all index AMs rely on: no
1232 * extant index tuple can ever be allowed to contain a TID that points to
1233 * an LP_UNUSED line pointer in the heap. We must disallow premature
1234 * recycling of line pointers to avoid index scans that get confused
1235 * about which TID points to which tuple immediately after recycling.
1236 * (Actually, this isn't a concern when target heap relation happens to
1237 * have no indexes, which allows us to safely apply the one-pass strategy
1238 * as an optimization).
1239 *
1240 * In practice we often have enough space to fit all TIDs, and so won't
1241 * need to call lazy_vacuum more than once, after our initial pass over
1242 * the heap has totally finished. Otherwise things are slightly more
1243 * complicated: our "initial pass" over the heap applies only to those
1244 * pages that were pruned before we needed to call lazy_vacuum, and our
1245 * "final pass" over the heap only vacuums these same heap pages.
1246 * However, we process indexes in full every time lazy_vacuum is called,
1247 * which makes index processing very inefficient when memory is in short
1248 * supply.
1249 */
1250static void
1252{
1253 ReadStream *stream;
1254 BlockNumber rel_pages = vacrel->rel_pages,
1255 blkno = 0,
1258 vacrel->eager_scan_remaining_successes; /* for logging */
1259 Buffer vmbuffer = InvalidBuffer;
1260 const int initprog_index[] = {
1264 };
1266
1267 /* Report that we're scanning the heap, advertising total # of blocks */
1269 initprog_val[1] = rel_pages;
1270 initprog_val[2] = vacrel->dead_items_info->max_bytes;
1272
1273 /* Initialize for the first heap_vac_scan_next_block() call */
1274 vacrel->current_block = InvalidBlockNumber;
1275 vacrel->next_unskippable_block = InvalidBlockNumber;
1276 vacrel->next_unskippable_eager_scanned = false;
1277 vacrel->next_unskippable_vmbuffer = InvalidBuffer;
1278
1279 /*
1280 * Set up the read stream for vacuum's first pass through the heap.
1281 *
1282 * This could be made safe for READ_STREAM_USE_BATCHING, but only with
1283 * explicit work in heap_vac_scan_next_block.
1284 */
1286 vacrel->bstrategy,
1287 vacrel->rel,
1290 vacrel,
1291 sizeof(bool));
1292
1293 while (true)
1294 {
1295 Buffer buf;
1296 Page page;
1297 bool was_eager_scanned = false;
1298 int ndeleted = 0;
1299 bool has_lpdead_items;
1300 void *per_buffer_data = NULL;
1301 bool vm_page_frozen = false;
1302 bool got_cleanup_lock = false;
1303
1304 vacuum_delay_point(false);
1305
1306 /*
1307 * Regularly check if wraparound failsafe should trigger.
1308 *
1309 * There is a similar check inside lazy_vacuum_all_indexes(), but
1310 * relfrozenxid might start to look dangerously old before we reach
1311 * that point. This check also provides failsafe coverage for the
1312 * one-pass strategy, and the two-pass strategy with the index_cleanup
1313 * param set to 'off'.
1314 */
1315 if (vacrel->scanned_pages > 0 &&
1316 vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0)
1318
1319 /*
1320 * Consider if we definitely have enough space to process TIDs on page
1321 * already. If we are close to overrunning the available space for
1322 * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
1323 * this page. However, let's force at least one page-worth of tuples
1324 * to be stored as to ensure we do at least some work when the memory
1325 * configured is so low that we run out before storing anything.
1326 */
1327 if (vacrel->dead_items_info->num_items > 0 &&
1328 TidStoreMemoryUsage(vacrel->dead_items) > vacrel->dead_items_info->max_bytes)
1329 {
1330 /*
1331 * Before beginning index vacuuming, we release any pin we may
1332 * hold on the visibility map page. This isn't necessary for
1333 * correctness, but we do it anyway to avoid holding the pin
1334 * across a lengthy, unrelated operation.
1335 */
1336 if (BufferIsValid(vmbuffer))
1337 {
1338 ReleaseBuffer(vmbuffer);
1339 vmbuffer = InvalidBuffer;
1340 }
1341
1342 /* Perform a round of index and heap vacuuming */
1343 vacrel->consider_bypass_optimization = false;
1345
1346 /*
1347 * Vacuum the Free Space Map to make newly-freed space visible on
1348 * upper-level FSM pages. Note that blkno is the previously
1349 * processed block.
1350 */
1352 blkno + 1);
1354
1355 /* Report that we are once again scanning the heap */
1358 }
1359
1360 buf = read_stream_next_buffer(stream, &per_buffer_data);
1361
1362 /* The relation is exhausted. */
1363 if (!BufferIsValid(buf))
1364 break;
1365
1366 was_eager_scanned = *((bool *) per_buffer_data);
1368 page = BufferGetPage(buf);
1369 blkno = BufferGetBlockNumber(buf);
1370
1371 vacrel->scanned_pages++;
1373 vacrel->eager_scanned_pages++;
1374
1375 /* Report as block scanned, update error traceback information */
1378 blkno, InvalidOffsetNumber);
1379
1380 /*
1381 * Pin the visibility map page in case we need to mark the page
1382 * all-visible. In most cases this will be very cheap, because we'll
1383 * already have the correct page pinned anyway.
1384 */
1385 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
1386
1387 /*
1388 * We need a buffer cleanup lock to prune HOT chains and defragment
1389 * the page in lazy_scan_prune. But when it's not possible to acquire
1390 * a cleanup lock right away, we may be able to settle for reduced
1391 * processing using lazy_scan_noprune.
1392 */
1394
1395 if (!got_cleanup_lock)
1397
1398 /* Check for new or empty pages before lazy_scan_[no]prune call */
1400 vmbuffer))
1401 {
1402 /* Processed as new/empty page (lock and pin released) */
1403 continue;
1404 }
1405
1406 /*
1407 * If we didn't get the cleanup lock, we can still collect LP_DEAD
1408 * items in the dead_items area for later vacuuming, count live and
1409 * recently dead tuples for vacuum logging, and determine if this
1410 * block could later be truncated. If we encounter any xid/mxids that
1411 * require advancing the relfrozenxid/relminxid, we'll have to wait
1412 * for a cleanup lock and call lazy_scan_prune().
1413 */
1414 if (!got_cleanup_lock &&
1415 !lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
1416 {
1417 /*
1418 * lazy_scan_noprune could not do all required processing. Wait
1419 * for a cleanup lock, and call lazy_scan_prune in the usual way.
1420 */
1421 Assert(vacrel->aggressive);
1424 got_cleanup_lock = true;
1425 }
1426
1427 /*
1428 * If we have a cleanup lock, we must now prune, freeze, and count
1429 * tuples. We may have acquired the cleanup lock originally, or we may
1430 * have gone back and acquired it after lazy_scan_noprune() returned
1431 * false. Either way, the page hasn't been processed yet.
1432 *
1433 * Like lazy_scan_noprune(), lazy_scan_prune() will count
1434 * recently_dead_tuples and live tuples for vacuum logging, determine
1435 * if the block can later be truncated, and accumulate the details of
1436 * remaining LP_DEAD line pointers on the page into dead_items. These
1437 * dead items include those pruned by lazy_scan_prune() as well as
1438 * line pointers previously marked LP_DEAD.
1439 */
1440 if (got_cleanup_lock)
1441 ndeleted = lazy_scan_prune(vacrel, buf, blkno, page,
1442 vmbuffer,
1444
1445 /*
1446 * Count an eagerly scanned page as a failure or a success.
1447 *
1448 * Only lazy_scan_prune() freezes pages, so if we didn't get the
1449 * cleanup lock, we won't have frozen the page. However, we only count
1450 * pages that were too new to require freezing as eager freeze
1451 * failures.
1452 *
1453 * We could gather more information from lazy_scan_noprune() about
1454 * whether or not there were tuples with XIDs or MXIDs older than the
1455 * FreezeLimit or MultiXactCutoff. However, for simplicity, we simply
1456 * exclude pages skipped due to cleanup lock contention from eager
1457 * freeze algorithm caps.
1458 */
1460 {
1461 /* Aggressive vacuums do not eager scan. */
1462 Assert(!vacrel->aggressive);
1463
1464 if (vm_page_frozen)
1465 {
1466 if (vacrel->eager_scan_remaining_successes > 0)
1467 vacrel->eager_scan_remaining_successes--;
1468
1469 if (vacrel->eager_scan_remaining_successes == 0)
1470 {
1471 /*
1472 * Report only once that we disabled eager scanning. We
1473 * may eagerly read ahead blocks in excess of the success
1474 * or failure caps before attempting to freeze them, so we
1475 * could reach here even after disabling additional eager
1476 * scanning.
1477 */
1478 if (vacrel->eager_scan_max_fails_per_region > 0)
1479 ereport(vacrel->verbose ? INFO : DEBUG2,
1480 (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of relation \"%s.%s.%s\"",
1482 vacrel->dbname, vacrel->relnamespace,
1483 vacrel->relname)));
1484
1485 /*
1486 * If we hit our success cap, permanently disable eager
1487 * scanning by setting the other eager scan management
1488 * fields to their disabled values.
1489 */
1490 vacrel->eager_scan_remaining_fails = 0;
1491 vacrel->next_eager_scan_region_start = InvalidBlockNumber;
1492 vacrel->eager_scan_max_fails_per_region = 0;
1493 }
1494 }
1495 else if (vacrel->eager_scan_remaining_fails > 0)
1496 vacrel->eager_scan_remaining_fails--;
1497 }
1498
1499 /*
1500 * Now drop the buffer lock and, potentially, update the FSM.
1501 *
1502 * Our goal is to update the freespace map the last time we touch the
1503 * page. If we'll process a block in the second pass, we may free up
1504 * additional space on the page, so it is better to update the FSM
1505 * after the second pass. If the relation has no indexes, or if index
1506 * vacuuming is disabled, there will be no second heap pass; if this
1507 * particular page has no dead items, the second heap pass will not
1508 * touch this page. So, in those cases, update the FSM now.
1509 *
1510 * Note: In corner cases, it's possible to miss updating the FSM
1511 * entirely. If index vacuuming is currently enabled, we'll skip the
1512 * FSM update now. But if failsafe mode is later activated, or there
1513 * are so few dead tuples that index vacuuming is bypassed, there will
1514 * also be no opportunity to update the FSM later, because we'll never
1515 * revisit this page. Since updating the FSM is desirable but not
1516 * absolutely required, that's OK.
1517 */
1518 if (vacrel->nindexes == 0
1519 || !vacrel->do_index_vacuuming
1520 || !has_lpdead_items)
1521 {
1522 Size freespace = PageGetHeapFreeSpace(page);
1523
1525 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1526
1527 /*
1528 * Periodically perform FSM vacuuming to make newly-freed space
1529 * visible on upper FSM pages. This is done after vacuuming if the
1530 * table has indexes. There will only be newly-freed space if we
1531 * held the cleanup lock and lazy_scan_prune() was called.
1532 */
1533 if (got_cleanup_lock && vacrel->nindexes == 0 && ndeleted > 0 &&
1535 {
1537 blkno);
1539 }
1540 }
1541 else
1543 }
1544
1545 vacrel->blkno = InvalidBlockNumber;
1546 if (BufferIsValid(vmbuffer))
1547 ReleaseBuffer(vmbuffer);
1548
1549 /*
1550 * Report that everything is now scanned. We never skip scanning the last
1551 * block in the relation, so we can pass rel_pages here.
1552 */
1554 rel_pages);
1555
1556 /* now we can compute the new value for pg_class.reltuples */
1557 vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1558 vacrel->scanned_pages,
1559 vacrel->live_tuples);
1560
1561 /*
1562 * Also compute the total number of surviving heap entries. In the
1563 * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1564 */
1565 vacrel->new_rel_tuples =
1566 Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1567 vacrel->missed_dead_tuples;
1568
1569 read_stream_end(stream);
1570
1571 /*
1572 * Do index vacuuming (call each index's ambulkdelete routine), then do
1573 * related heap vacuuming
1574 */
1575 if (vacrel->dead_items_info->num_items > 0)
1577
1578 /*
1579 * Vacuum the remainder of the Free Space Map. We must do this whether or
1580 * not there were indexes, and whether or not we bypassed index vacuuming.
1581 * We can pass rel_pages here because we never skip scanning the last
1582 * block of the relation.
1583 */
1584 if (rel_pages > next_fsm_block_to_vacuum)
1586
1587 /* report all blocks vacuumed */
1589
1590 /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1591 if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1593}
1594
1595/*
1596 * heap_vac_scan_next_block() -- read stream callback to get the next block
1597 * for vacuum to process
1598 *
1599 * Every time lazy_scan_heap() needs a new block to process during its first
1600 * phase, it invokes read_stream_next_buffer() with a stream set up to call
1601 * heap_vac_scan_next_block() to get the next block.
1602 *
1603 * heap_vac_scan_next_block() uses the visibility map, vacuum options, and
1604 * various thresholds to skip blocks which do not need to be processed and
1605 * returns the next block to process or InvalidBlockNumber if there are no
1606 * remaining blocks.
1607 *
1608 * The visibility status of the next block to process and whether or not it
1609 * was eager scanned is set in the per_buffer_data.
1610 *
1611 * callback_private_data contains a reference to the LVRelState, passed to the
1612 * read stream API during stream setup. The LVRelState is an in/out parameter
1613 * here (locally named `vacrel`). Vacuum options and information about the
1614 * relation are read from it. vacrel->skippedallvis is set if we skip a block
1615 * that's all-visible but not all-frozen (to ensure that we don't update
1616 * relfrozenxid in that case). vacrel also holds information about the next
1617 * unskippable block -- as bookkeeping for this function.
1618 */
1619static BlockNumber
1621 void *callback_private_data,
1622 void *per_buffer_data)
1623{
1625 LVRelState *vacrel = callback_private_data;
1626
1627 /* relies on InvalidBlockNumber + 1 overflowing to 0 on first call */
1629
1630 /* Have we reached the end of the relation? */
1631 if (next_block >= vacrel->rel_pages)
1632 {
1633 if (BufferIsValid(vacrel->next_unskippable_vmbuffer))
1634 {
1635 ReleaseBuffer(vacrel->next_unskippable_vmbuffer);
1636 vacrel->next_unskippable_vmbuffer = InvalidBuffer;
1637 }
1638 return InvalidBlockNumber;
1639 }
1640
1641 /*
1642 * We must be in one of the three following states:
1643 */
1644 if (next_block > vacrel->next_unskippable_block ||
1645 vacrel->next_unskippable_block == InvalidBlockNumber)
1646 {
1647 /*
1648 * 1. We have just processed an unskippable block (or we're at the
1649 * beginning of the scan). Find the next unskippable block using the
1650 * visibility map.
1651 */
1652 bool skipsallvis;
1653
1655
1656 /*
1657 * We now know the next block that we must process. It can be the
1658 * next block after the one we just processed, or something further
1659 * ahead. If it's further ahead, we can jump to it, but we choose to
1660 * do so only if we can skip at least SKIP_PAGES_THRESHOLD consecutive
1661 * pages. Since we're reading sequentially, the OS should be doing
1662 * readahead for us, so there's no gain in skipping a page now and
1663 * then. Skipping such a range might even discourage sequential
1664 * detection.
1665 *
1666 * This test also enables more frequent relfrozenxid advancement
1667 * during non-aggressive VACUUMs. If the range has any all-visible
1668 * pages then skipping makes updating relfrozenxid unsafe, which is a
1669 * real downside.
1670 */
1671 if (vacrel->next_unskippable_block - next_block >= SKIP_PAGES_THRESHOLD)
1672 {
1673 next_block = vacrel->next_unskippable_block;
1674 if (skipsallvis)
1675 vacrel->skippedallvis = true;
1676 }
1677 }
1678
1679 /* Now we must be in one of the two remaining states: */
1680 if (next_block < vacrel->next_unskippable_block)
1681 {
1682 /*
1683 * 2. We are processing a range of blocks that we could have skipped
1684 * but chose not to. We know that they are all-visible in the VM,
1685 * otherwise they would've been unskippable.
1686 */
1687 vacrel->current_block = next_block;
1688 /* Block was not eager scanned */
1689 *((bool *) per_buffer_data) = false;
1690 return vacrel->current_block;
1691 }
1692 else
1693 {
1694 /*
1695 * 3. We reached the next unskippable block. Process it. On next
1696 * iteration, we will be back in state 1.
1697 */
1698 Assert(next_block == vacrel->next_unskippable_block);
1699
1700 vacrel->current_block = next_block;
1701 *((bool *) per_buffer_data) = vacrel->next_unskippable_eager_scanned;
1702 return vacrel->current_block;
1703 }
1704}
1705
1706/*
1707 * Find the next unskippable block in a vacuum scan using the visibility map.
1708 * The next unskippable block and its visibility information is updated in
1709 * vacrel.
1710 *
1711 * Note: our opinion of which blocks can be skipped can go stale immediately.
1712 * It's okay if caller "misses" a page whose all-visible or all-frozen marking
1713 * was concurrently cleared, though. All that matters is that caller scan all
1714 * pages whose tuples might contain XIDs < OldestXmin, or MXIDs < OldestMxact.
1715 * (Actually, non-aggressive VACUUMs can choose to skip all-visible pages with
1716 * older XIDs/MXIDs. The *skippedallvis flag will be set here when the choice
1717 * to skip such a range is actually made, making everything safe.)
1718 */
1719static void
1721{
1722 BlockNumber rel_pages = vacrel->rel_pages;
1723 BlockNumber next_unskippable_block = vacrel->next_unskippable_block + 1;
1724 Buffer next_unskippable_vmbuffer = vacrel->next_unskippable_vmbuffer;
1725 bool next_unskippable_eager_scanned = false;
1726
1727 *skipsallvis = false;
1728
1729 for (;; next_unskippable_block++)
1730 {
1732 next_unskippable_block,
1733 &next_unskippable_vmbuffer);
1734
1735
1736 /*
1737 * At the start of each eager scan region, normal vacuums with eager
1738 * scanning enabled reset the failure counter, allowing vacuum to
1739 * resume eager scanning if it had been suspended in the previous
1740 * region.
1741 */
1742 if (next_unskippable_block >= vacrel->next_eager_scan_region_start)
1743 {
1744 vacrel->eager_scan_remaining_fails =
1745 vacrel->eager_scan_max_fails_per_region;
1746 vacrel->next_eager_scan_region_start += EAGER_SCAN_REGION_SIZE;
1747 }
1748
1749 /*
1750 * A block is unskippable if it is not all visible according to the
1751 * visibility map.
1752 */
1754 {
1756 break;
1757 }
1758
1759 /*
1760 * Caller must scan the last page to determine whether it has tuples
1761 * (caller must have the opportunity to set vacrel->nonempty_pages).
1762 * This rule avoids having lazy_truncate_heap() take access-exclusive
1763 * lock on rel to attempt a truncation that fails anyway, just because
1764 * there are tuples on the last page (it is likely that there will be
1765 * tuples on other nearby pages as well, but those can be skipped).
1766 *
1767 * Implement this by always treating the last block as unsafe to skip.
1768 */
1769 if (next_unskippable_block == rel_pages - 1)
1770 break;
1771
1772 /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1773 if (!vacrel->skipwithvm)
1774 break;
1775
1776 /*
1777 * All-frozen pages cannot contain XIDs < OldestXmin (XIDs that aren't
1778 * already frozen by now), so this page can be skipped.
1779 */
1780 if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0)
1781 continue;
1782
1783 /*
1784 * Aggressive vacuums cannot skip any all-visible pages that are not
1785 * also all-frozen.
1786 */
1787 if (vacrel->aggressive)
1788 break;
1789
1790 /*
1791 * Normal vacuums with eager scanning enabled only skip all-visible
1792 * but not all-frozen pages if they have hit the failure limit for the
1793 * current eager scan region.
1794 */
1795 if (vacrel->eager_scan_remaining_fails > 0)
1796 {
1797 next_unskippable_eager_scanned = true;
1798 break;
1799 }
1800
1801 /*
1802 * All-visible blocks are safe to skip in a normal vacuum. But
1803 * remember that the final range contains such a block for later.
1804 */
1805 *skipsallvis = true;
1806 }
1807
1808 /* write the local variables back to vacrel */
1809 vacrel->next_unskippable_block = next_unskippable_block;
1810 vacrel->next_unskippable_eager_scanned = next_unskippable_eager_scanned;
1811 vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
1812}
1813
1814/*
1815 * lazy_scan_new_or_empty() -- lazy_scan_heap() new/empty page handling.
1816 *
1817 * Must call here to handle both new and empty pages before calling
1818 * lazy_scan_prune or lazy_scan_noprune, since they're not prepared to deal
1819 * with new or empty pages.
1820 *
1821 * It's necessary to consider new pages as a special case, since the rules for
1822 * maintaining the visibility map and FSM with empty pages are a little
1823 * different (though new pages can be truncated away during rel truncation).
1824 *
1825 * Empty pages are not really a special case -- they're just heap pages that
1826 * have no allocated tuples (including even LP_UNUSED items). You might
1827 * wonder why we need to handle them here all the same. It's only necessary
1828 * because of a corner-case involving a hard crash during heap relation
1829 * extension. If we ever make relation-extension crash safe, then it should
1830 * no longer be necessary to deal with empty pages here (or new pages, for
1831 * that matter).
1832 *
1833 * Caller must hold at least a shared lock. We might need to escalate the
1834 * lock in that case, so the type of lock caller holds needs to be specified
1835 * using 'sharelock' argument.
1836 *
1837 * Returns false in common case where caller should go on to call
1838 * lazy_scan_prune (or lazy_scan_noprune). Otherwise returns true, indicating
1839 * that lazy_scan_heap is done processing the page, releasing lock on caller's
1840 * behalf.
1841 *
1842 * No vm_page_frozen output parameter (like that passed to lazy_scan_prune())
1843 * is passed here because neither empty nor new pages can be eagerly frozen.
1844 * New pages are never frozen. Empty pages are always set frozen in the VM at
1845 * the same time that they are set all-visible, and we don't eagerly scan
1846 * frozen pages.
1847 */
1848static bool
1850 Page page, bool sharelock, Buffer vmbuffer)
1851{
1852 Size freespace;
1853
1854 if (PageIsNew(page))
1855 {
1856 /*
1857 * All-zeroes pages can be left over if either a backend extends the
1858 * relation by a single page, but crashes before the newly initialized
1859 * page has been written out, or when bulk-extending the relation
1860 * (which creates a number of empty pages at the tail end of the
1861 * relation), and then enters them into the FSM.
1862 *
1863 * Note we do not enter the page into the visibilitymap. That has the
1864 * downside that we repeatedly visit this page in subsequent vacuums,
1865 * but otherwise we'll never discover the space on a promoted standby.
1866 * The harm of repeated checking ought to normally not be too bad. The
1867 * space usually should be used at some point, otherwise there
1868 * wouldn't be any regular vacuums.
1869 *
1870 * Make sure these pages are in the FSM, to ensure they can be reused.
1871 * Do that by testing if there's any space recorded for the page. If
1872 * not, enter it. We do so after releasing the lock on the heap page,
1873 * the FSM is approximate, after all.
1874 */
1876
1877 if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1878 {
1879 freespace = BLCKSZ - SizeOfPageHeaderData;
1880
1881 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1882 }
1883
1884 return true;
1885 }
1886
1887 if (PageIsEmpty(page))
1888 {
1889 /*
1890 * It seems likely that caller will always be able to get a cleanup
1891 * lock on an empty page. But don't take any chances -- escalate to
1892 * an exclusive lock (still don't need a cleanup lock, though).
1893 */
1894 if (sharelock)
1895 {
1898
1899 if (!PageIsEmpty(page))
1900 {
1901 /* page isn't new or empty -- keep lock and pin for now */
1902 return false;
1903 }
1904 }
1905 else
1906 {
1907 /* Already have a full cleanup lock (which is more than enough) */
1908 }
1909
1910 /*
1911 * Unlike new pages, empty pages are always set all-visible and
1912 * all-frozen.
1913 */
1914 if (!PageIsAllVisible(page))
1915 {
1917
1918 /* mark buffer dirty before writing a WAL record */
1920
1921 /*
1922 * It's possible that another backend has extended the heap,
1923 * initialized the page, and then failed to WAL-log the page due
1924 * to an ERROR. Since heap extension is not WAL-logged, recovery
1925 * might try to replay our record setting the page all-visible and
1926 * find that the page isn't initialized, which will cause a PANIC.
1927 * To prevent that, check whether the page has been previously
1928 * WAL-logged, and if not, do that now.
1929 */
1930 if (RelationNeedsWAL(vacrel->rel) &&
1932 log_newpage_buffer(buf, true);
1933
1934 PageSetAllVisible(page);
1935 visibilitymap_set(vacrel->rel, blkno, buf,
1937 vmbuffer, InvalidTransactionId,
1941
1942 /* Count the newly all-frozen pages for logging */
1943 vacrel->new_all_visible_pages++;
1944 vacrel->new_all_visible_all_frozen_pages++;
1945 }
1946
1947 freespace = PageGetHeapFreeSpace(page);
1949 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1950 return true;
1951 }
1952
1953 /* page isn't new or empty -- keep lock and pin */
1954 return false;
1955}
1956
1957/* qsort comparator for sorting OffsetNumbers */
1958static int
1959cmpOffsetNumbers(const void *a, const void *b)
1960{
1961 return pg_cmp_u16(*(const OffsetNumber *) a, *(const OffsetNumber *) b);
1962}
1963
1964/*
1965 * Helper to correct any corruption detected on a heap page and its
1966 * corresponding visibility map page after pruning but before setting the
1967 * visibility map. It examines the heap page, the associated VM page, and the
1968 * number of dead items previously identified.
1969 *
1970 * This function must be called while holding an exclusive lock on the heap
1971 * buffer, and the dead items must have been discovered under that same lock.
1972
1973 * The provided vmbits must reflect the current state of the VM block
1974 * referenced by vmbuffer. Although we do not hold a lock on the VM buffer, it
1975 * is pinned, and the heap buffer is exclusively locked, ensuring that no
1976 * other backend can update the VM bits corresponding to this heap page.
1977 *
1978 * If it clears corruption, it will zero out vmbits.
1979 */
1980static void
1983 int nlpdead_items,
1984 Buffer vmbuffer,
1985 uint8 *vmbits)
1986{
1987 Assert(visibilitymap_get_status(rel, heap_blk, &vmbuffer) == *vmbits);
1988
1990
1991 /*
1992 * As of PostgreSQL 9.2, the visibility map bit should never be set if the
1993 * page-level bit is clear. However, it's possible that the bit got
1994 * cleared after heap_vac_scan_next_block() was called, so we must recheck
1995 * with buffer lock before concluding that the VM is corrupt.
1996 */
1998 ((*vmbits & VISIBILITYMAP_VALID_BITS) != 0))
1999 {
2002 errmsg("page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
2004
2005 visibilitymap_clear(rel, heap_blk, vmbuffer,
2007 *vmbits = 0;
2008 }
2009
2010 /*
2011 * It's possible for the value returned by
2012 * GetOldestNonRemovableTransactionId() to move backwards, so it's not
2013 * wrong for us to see tuples that appear to not be visible to everyone
2014 * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
2015 * never moves backwards, but GetOldestNonRemovableTransactionId() is
2016 * conservative and sometimes returns a value that's unnecessarily small,
2017 * so if we see that contradiction it just means that the tuples that we
2018 * think are not visible to everyone yet actually are, and the
2019 * PD_ALL_VISIBLE flag is correct.
2020 *
2021 * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
2022 * however.
2023 */
2024 else if (PageIsAllVisible(heap_page) && nlpdead_items > 0)
2025 {
2028 errmsg("page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
2030
2033 visibilitymap_clear(rel, heap_blk, vmbuffer,
2035 *vmbits = 0;
2036 }
2037}
2038
2039/*
2040 * lazy_scan_prune() -- lazy_scan_heap() pruning and freezing.
2041 *
2042 * Caller must hold pin and buffer cleanup lock on the buffer.
2043 *
2044 * vmbuffer is the buffer containing the VM block with visibility information
2045 * for the heap block, blkno.
2046 *
2047 * *has_lpdead_items is set to true or false depending on whether, upon return
2048 * from this function, any LP_DEAD items are still present on the page.
2049 *
2050 * *vm_page_frozen is set to true if the page is newly set all-frozen in the
2051 * VM. The caller currently only uses this for determining whether an eagerly
2052 * scanned page was successfully set all-frozen.
2053 *
2054 * Returns the number of tuples deleted from the page during HOT pruning.
2055 */
2056static int
2058 Buffer buf,
2059 BlockNumber blkno,
2060 Page page,
2061 Buffer vmbuffer,
2062 bool *has_lpdead_items,
2063 bool *vm_page_frozen)
2064{
2065 Relation rel = vacrel->rel;
2067 PruneFreezeParams params = {
2068 .relation = rel,
2069 .buffer = buf,
2070 .reason = PRUNE_VACUUM_SCAN,
2071 .options = HEAP_PAGE_PRUNE_FREEZE,
2072 .vistest = vacrel->vistest,
2073 .cutoffs = &vacrel->cutoffs,
2074 };
2075 uint8 old_vmbits = 0;
2076 uint8 new_vmbits = 0;
2077
2078 Assert(BufferGetBlockNumber(buf) == blkno);
2079
2080 /*
2081 * Prune all HOT-update chains and potentially freeze tuples on this page.
2082 *
2083 * If the relation has no indexes, we can immediately mark would-be dead
2084 * items LP_UNUSED.
2085 *
2086 * The number of tuples removed from the page is returned in
2087 * presult.ndeleted. It should not be confused with presult.lpdead_items;
2088 * presult.lpdead_items's final value can be thought of as the number of
2089 * tuples that were deleted from indexes.
2090 *
2091 * We will update the VM after collecting LP_DEAD items and freezing
2092 * tuples. Pruning will have determined whether or not the page is
2093 * all-visible.
2094 */
2095 if (vacrel->nindexes == 0)
2097
2099 &presult,
2100 &vacrel->offnum,
2101 &vacrel->NewRelfrozenXid, &vacrel->NewRelminMxid);
2102
2103 Assert(MultiXactIdIsValid(vacrel->NewRelminMxid));
2104 Assert(TransactionIdIsValid(vacrel->NewRelfrozenXid));
2105
2106 if (presult.nfrozen > 0)
2107 {
2108 /*
2109 * We don't increment the new_frozen_tuple_pages instrumentation
2110 * counter when nfrozen == 0, since it only counts pages with newly
2111 * frozen tuples (don't confuse that with pages newly set all-frozen
2112 * in VM).
2113 */
2114 vacrel->new_frozen_tuple_pages++;
2115 }
2116
2117 /*
2118 * VACUUM will call heap_page_is_all_visible() during the second pass over
2119 * the heap to determine all_visible and all_frozen for the page -- this
2120 * is a specialized version of the logic from this function. Now that
2121 * we've finished pruning and freezing, make sure that we're in total
2122 * agreement with heap_page_is_all_visible() using an assertion.
2123 */
2124#ifdef USE_ASSERT_CHECKING
2125 if (presult.all_visible)
2126 {
2128 bool debug_all_frozen;
2129
2130 Assert(presult.lpdead_items == 0);
2131
2133 vacrel->cutoffs.OldestXmin, &debug_all_frozen,
2134 &debug_cutoff, &vacrel->offnum));
2135
2136 Assert(presult.all_frozen == debug_all_frozen);
2137
2139 debug_cutoff == presult.vm_conflict_horizon);
2140 }
2141#endif
2142
2143 /*
2144 * Now save details of the LP_DEAD items from the page in vacrel
2145 */
2146 if (presult.lpdead_items > 0)
2147 {
2148 vacrel->lpdead_item_pages++;
2149
2150 /*
2151 * deadoffsets are collected incrementally in
2152 * heap_page_prune_and_freeze() as each dead line pointer is recorded,
2153 * with an indeterminate order, but dead_items_add requires them to be
2154 * sorted.
2155 */
2156 qsort(presult.deadoffsets, presult.lpdead_items, sizeof(OffsetNumber),
2158
2159 dead_items_add(vacrel, blkno, presult.deadoffsets, presult.lpdead_items);
2160 }
2161
2162 /* Finally, add page-local counts to whole-VACUUM counts */
2163 vacrel->tuples_deleted += presult.ndeleted;
2164 vacrel->tuples_frozen += presult.nfrozen;
2165 vacrel->lpdead_items += presult.lpdead_items;
2166 vacrel->live_tuples += presult.live_tuples;
2167 vacrel->recently_dead_tuples += presult.recently_dead_tuples;
2168
2169 /* Can't truncate this page */
2170 if (presult.hastup)
2171 vacrel->nonempty_pages = blkno + 1;
2172
2173 /* Did we find LP_DEAD items? */
2174 *has_lpdead_items = (presult.lpdead_items > 0);
2175
2176 Assert(!presult.all_visible || !(*has_lpdead_items));
2177 Assert(!presult.all_frozen || presult.all_visible);
2178
2179 old_vmbits = visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer);
2180
2181 identify_and_fix_vm_corruption(vacrel->rel, buf, blkno, page,
2182 presult.lpdead_items, vmbuffer,
2183 &old_vmbits);
2184
2185 if (!presult.all_visible)
2186 return presult.ndeleted;
2187
2188 /* Set the visibility map and page visibility hint */
2190
2191 if (presult.all_frozen)
2193
2194 /* Nothing to do */
2195 if (old_vmbits == new_vmbits)
2196 return presult.ndeleted;
2197
2198 /*
2199 * It should never be the case that the visibility map page is set while
2200 * the page-level bit is clear (and if so, we cleared it above), but the
2201 * reverse is allowed (if checksums are not enabled). Regardless, set both
2202 * bits so that we get back in sync.
2203 *
2204 * The heap buffer must be marked dirty before adding it to the WAL chain
2205 * when setting the VM. We don't worry about unnecessarily dirtying the
2206 * heap buffer if PD_ALL_VISIBLE is already set, though. It is extremely
2207 * rare to have a clean heap buffer with PD_ALL_VISIBLE already set and
2208 * the VM bits clear, so there is no point in optimizing it.
2209 */
2210 PageSetAllVisible(page);
2212
2213 /*
2214 * If the page is being set all-frozen, we pass InvalidTransactionId as
2215 * the cutoff_xid, since a snapshot conflict horizon sufficient to make
2216 * everything safe for REDO was logged when the page's tuples were frozen.
2217 */
2218 Assert(!presult.all_frozen ||
2219 !TransactionIdIsValid(presult.vm_conflict_horizon));
2220
2221 visibilitymap_set(vacrel->rel, blkno, buf,
2223 vmbuffer, presult.vm_conflict_horizon,
2224 new_vmbits);
2225
2226 /*
2227 * If the page wasn't already set all-visible and/or all-frozen in the VM,
2228 * count it as newly set for logging.
2229 */
2231 {
2232 vacrel->new_all_visible_pages++;
2233 if (presult.all_frozen)
2234 {
2235 vacrel->new_all_visible_all_frozen_pages++;
2236 *vm_page_frozen = true;
2237 }
2238 }
2239 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
2240 presult.all_frozen)
2241 {
2242 vacrel->new_all_frozen_pages++;
2243 *vm_page_frozen = true;
2244 }
2245
2246 return presult.ndeleted;
2247}
2248
2249/*
2250 * lazy_scan_noprune() -- lazy_scan_prune() without pruning or freezing
2251 *
2252 * Caller need only hold a pin and share lock on the buffer, unlike
2253 * lazy_scan_prune, which requires a full cleanup lock. While pruning isn't
2254 * performed here, it's quite possible that an earlier opportunistic pruning
2255 * operation left LP_DEAD items behind. We'll at least collect any such items
2256 * in dead_items for removal from indexes.
2257 *
2258 * For aggressive VACUUM callers, we may return false to indicate that a full
2259 * cleanup lock is required for processing by lazy_scan_prune. This is only
2260 * necessary when the aggressive VACUUM needs to freeze some tuple XIDs from
2261 * one or more tuples on the page. We always return true for non-aggressive
2262 * callers.
2263 *
2264 * If this function returns true, *has_lpdead_items gets set to true or false
2265 * depending on whether, upon return from this function, any LP_DEAD items are
2266 * present on the page. If this function returns false, *has_lpdead_items
2267 * is not updated.
2268 */
2269static bool
2271 Buffer buf,
2272 BlockNumber blkno,
2273 Page page,
2274 bool *has_lpdead_items)
2275{
2276 OffsetNumber offnum,
2277 maxoff;
2278 int lpdead_items,
2279 live_tuples,
2280 recently_dead_tuples,
2281 missed_dead_tuples;
2282 bool hastup;
2284 TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
2285 MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
2287
2288 Assert(BufferGetBlockNumber(buf) == blkno);
2289
2290 hastup = false; /* for now */
2291
2292 lpdead_items = 0;
2293 live_tuples = 0;
2294 recently_dead_tuples = 0;
2295 missed_dead_tuples = 0;
2296
2297 maxoff = PageGetMaxOffsetNumber(page);
2298 for (offnum = FirstOffsetNumber;
2299 offnum <= maxoff;
2300 offnum = OffsetNumberNext(offnum))
2301 {
2302 ItemId itemid;
2303 HeapTupleData tuple;
2304
2305 vacrel->offnum = offnum;
2306 itemid = PageGetItemId(page, offnum);
2307
2308 if (!ItemIdIsUsed(itemid))
2309 continue;
2310
2311 if (ItemIdIsRedirected(itemid))
2312 {
2313 hastup = true;
2314 continue;
2315 }
2316
2317 if (ItemIdIsDead(itemid))
2318 {
2319 /*
2320 * Deliberately don't set hastup=true here. See same point in
2321 * lazy_scan_prune for an explanation.
2322 */
2323 deadoffsets[lpdead_items++] = offnum;
2324 continue;
2325 }
2326
2327 hastup = true; /* page prevents rel truncation */
2328 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
2330 &NoFreezePageRelfrozenXid,
2331 &NoFreezePageRelminMxid))
2332 {
2333 /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
2334 if (vacrel->aggressive)
2335 {
2336 /*
2337 * Aggressive VACUUMs must always be able to advance rel's
2338 * relfrozenxid to a value >= FreezeLimit (and be able to
2339 * advance rel's relminmxid to a value >= MultiXactCutoff).
2340 * The ongoing aggressive VACUUM won't be able to do that
2341 * unless it can freeze an XID (or MXID) from this tuple now.
2342 *
2343 * The only safe option is to have caller perform processing
2344 * of this page using lazy_scan_prune. Caller might have to
2345 * wait a while for a cleanup lock, but it can't be helped.
2346 */
2347 vacrel->offnum = InvalidOffsetNumber;
2348 return false;
2349 }
2350
2351 /*
2352 * Non-aggressive VACUUMs are under no obligation to advance
2353 * relfrozenxid (even by one XID). We can be much laxer here.
2354 *
2355 * Currently we always just accept an older final relfrozenxid
2356 * and/or relminmxid value. We never make caller wait or work a
2357 * little harder, even when it likely makes sense to do so.
2358 */
2359 }
2360
2361 ItemPointerSet(&(tuple.t_self), blkno, offnum);
2362 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2363 tuple.t_len = ItemIdGetLength(itemid);
2364 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
2365
2366 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
2367 buf))
2368 {
2370 case HEAPTUPLE_LIVE:
2371
2372 /*
2373 * Count both cases as live, just like lazy_scan_prune
2374 */
2375 live_tuples++;
2376
2377 break;
2378 case HEAPTUPLE_DEAD:
2379
2380 /*
2381 * There is some useful work for pruning to do, that won't be
2382 * done due to failure to get a cleanup lock.
2383 */
2384 missed_dead_tuples++;
2385 break;
2387
2388 /*
2389 * Count in recently_dead_tuples, just like lazy_scan_prune
2390 */
2391 recently_dead_tuples++;
2392 break;
2394
2395 /*
2396 * Do not count these rows as live, just like lazy_scan_prune
2397 */
2398 break;
2399 default:
2400 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2401 break;
2402 }
2403 }
2404
2405 vacrel->offnum = InvalidOffsetNumber;
2406
2407 /*
2408 * By here we know for sure that caller can put off freezing and pruning
2409 * this particular page until the next VACUUM. Remember its details now.
2410 * (lazy_scan_prune expects a clean slate, so we have to do this last.)
2411 */
2412 vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
2413 vacrel->NewRelminMxid = NoFreezePageRelminMxid;
2414
2415 /* Save any LP_DEAD items found on the page in dead_items */
2416 if (vacrel->nindexes == 0)
2417 {
2418 /* Using one-pass strategy (since table has no indexes) */
2419 if (lpdead_items > 0)
2420 {
2421 /*
2422 * Perfunctory handling for the corner case where a single pass
2423 * strategy VACUUM cannot get a cleanup lock, and it turns out
2424 * that there is one or more LP_DEAD items: just count the LP_DEAD
2425 * items as missed_dead_tuples instead. (This is a bit dishonest,
2426 * but it beats having to maintain specialized heap vacuuming code
2427 * forever, for vanishingly little benefit.)
2428 */
2429 hastup = true;
2430 missed_dead_tuples += lpdead_items;
2431 }
2432 }
2433 else if (lpdead_items > 0)
2434 {
2435 /*
2436 * Page has LP_DEAD items, and so any references/TIDs that remain in
2437 * indexes will be deleted during index vacuuming (and then marked
2438 * LP_UNUSED in the heap)
2439 */
2440 vacrel->lpdead_item_pages++;
2441
2442 dead_items_add(vacrel, blkno, deadoffsets, lpdead_items);
2443
2444 vacrel->lpdead_items += lpdead_items;
2445 }
2446
2447 /*
2448 * Finally, add relevant page-local counts to whole-VACUUM counts
2449 */
2450 vacrel->live_tuples += live_tuples;
2451 vacrel->recently_dead_tuples += recently_dead_tuples;
2452 vacrel->missed_dead_tuples += missed_dead_tuples;
2453 if (missed_dead_tuples > 0)
2454 vacrel->missed_dead_pages++;
2455
2456 /* Can't truncate this page */
2457 if (hastup)
2458 vacrel->nonempty_pages = blkno + 1;
2459
2460 /* Did we find LP_DEAD items? */
2461 *has_lpdead_items = (lpdead_items > 0);
2462
2463 /* Caller won't need to call lazy_scan_prune with same page */
2464 return true;
2465}
2466
2467/*
2468 * Main entry point for index vacuuming and heap vacuuming.
2469 *
2470 * Removes items collected in dead_items from table's indexes, then marks the
2471 * same items LP_UNUSED in the heap. See the comments above lazy_scan_heap
2472 * for full details.
2473 *
2474 * Also empties dead_items, freeing up space for later TIDs.
2475 *
2476 * We may choose to bypass index vacuuming at this point, though only when the
2477 * ongoing VACUUM operation will definitely only have one index scan/round of
2478 * index vacuuming.
2479 */
2480static void
2482{
2483 bool bypass;
2484
2485 /* Should not end up here with no indexes */
2486 Assert(vacrel->nindexes > 0);
2487 Assert(vacrel->lpdead_item_pages > 0);
2488
2489 if (!vacrel->do_index_vacuuming)
2490 {
2491 Assert(!vacrel->do_index_cleanup);
2493 return;
2494 }
2495
2496 /*
2497 * Consider bypassing index vacuuming (and heap vacuuming) entirely.
2498 *
2499 * We currently only do this in cases where the number of LP_DEAD items
2500 * for the entire VACUUM operation is close to zero. This avoids sharp
2501 * discontinuities in the duration and overhead of successive VACUUM
2502 * operations that run against the same table with a fixed workload.
2503 * Ideally, successive VACUUM operations will behave as if there are
2504 * exactly zero LP_DEAD items in cases where there are close to zero.
2505 *
2506 * This is likely to be helpful with a table that is continually affected
2507 * by UPDATEs that can mostly apply the HOT optimization, but occasionally
2508 * have small aberrations that lead to just a few heap pages retaining
2509 * only one or two LP_DEAD items. This is pretty common; even when the
2510 * DBA goes out of their way to make UPDATEs use HOT, it is practically
2511 * impossible to predict whether HOT will be applied in 100% of cases.
2512 * It's far easier to ensure that 99%+ of all UPDATEs against a table use
2513 * HOT through careful tuning.
2514 */
2515 bypass = false;
2516 if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
2517 {
2519
2520 Assert(vacrel->num_index_scans == 0);
2521 Assert(vacrel->lpdead_items == vacrel->dead_items_info->num_items);
2522 Assert(vacrel->do_index_vacuuming);
2523 Assert(vacrel->do_index_cleanup);
2524
2525 /*
2526 * This crossover point at which we'll start to do index vacuuming is
2527 * expressed as a percentage of the total number of heap pages in the
2528 * table that are known to have at least one LP_DEAD item. This is
2529 * much more important than the total number of LP_DEAD items, since
2530 * it's a proxy for the number of heap pages whose visibility map bits
2531 * cannot be set on account of bypassing index and heap vacuuming.
2532 *
2533 * We apply one further precautionary test: the space currently used
2534 * to store the TIDs (TIDs that now all point to LP_DEAD items) must
2535 * not exceed 32MB. This limits the risk that we will bypass index
2536 * vacuuming again and again until eventually there is a VACUUM whose
2537 * dead_items space is not CPU cache resident.
2538 *
2539 * We don't take any special steps to remember the LP_DEAD items (such
2540 * as counting them in our final update to the stats system) when the
2541 * optimization is applied. Though the accounting used in analyze.c's
2542 * acquire_sample_rows() will recognize the same LP_DEAD items as dead
2543 * rows in its own stats report, that's okay. The discrepancy should
2544 * be negligible. If this optimization is ever expanded to cover more
2545 * cases then this may need to be reconsidered.
2546 */
2548 bypass = (vacrel->lpdead_item_pages < threshold &&
2549 TidStoreMemoryUsage(vacrel->dead_items) < 32 * 1024 * 1024);
2550 }
2551
2552 if (bypass)
2553 {
2554 /*
2555 * There are almost zero TIDs. Behave as if there were precisely
2556 * zero: bypass index vacuuming, but do index cleanup.
2557 *
2558 * We expect that the ongoing VACUUM operation will finish very
2559 * quickly, so there is no point in considering speeding up as a
2560 * failsafe against wraparound failure. (Index cleanup is expected to
2561 * finish very quickly in cases where there were no ambulkdelete()
2562 * calls.)
2563 */
2564 vacrel->do_index_vacuuming = false;
2565 }
2567 {
2568 /*
2569 * We successfully completed a round of index vacuuming. Do related
2570 * heap vacuuming now.
2571 */
2573 }
2574 else
2575 {
2576 /*
2577 * Failsafe case.
2578 *
2579 * We attempted index vacuuming, but didn't finish a full round/full
2580 * index scan. This happens when relfrozenxid or relminmxid is too
2581 * far in the past.
2582 *
2583 * From this point on the VACUUM operation will do no further index
2584 * vacuuming or heap vacuuming. This VACUUM operation won't end up
2585 * back here again.
2586 */
2588 }
2589
2590 /*
2591 * Forget the LP_DEAD items that we just vacuumed (or just decided to not
2592 * vacuum)
2593 */
2595}
2596
2597/*
2598 * lazy_vacuum_all_indexes() -- Main entry for index vacuuming
2599 *
2600 * Returns true in the common case when all indexes were successfully
2601 * vacuumed. Returns false in rare cases where we determined that the ongoing
2602 * VACUUM operation is at risk of taking too long to finish, leading to
2603 * wraparound failure.
2604 */
2605static bool
2607{
2608 bool allindexes = true;
2609 double old_live_tuples = vacrel->rel->rd_rel->reltuples;
2610 const int progress_start_index[] = {
2613 };
2614 const int progress_end_index[] = {
2618 };
2621
2622 Assert(vacrel->nindexes > 0);
2623 Assert(vacrel->do_index_vacuuming);
2624 Assert(vacrel->do_index_cleanup);
2625
2626 /* Precheck for XID wraparound emergencies */
2628 {
2629 /* Wraparound emergency -- don't even start an index scan */
2630 return false;
2631 }
2632
2633 /*
2634 * Report that we are now vacuuming indexes and the number of indexes to
2635 * vacuum.
2636 */
2638 progress_start_val[1] = vacrel->nindexes;
2640
2642 {
2643 for (int idx = 0; idx < vacrel->nindexes; idx++)
2644 {
2645 Relation indrel = vacrel->indrels[idx];
2646 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2647
2648 vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat,
2650 vacrel);
2651
2652 /* Report the number of indexes vacuumed */
2654 idx + 1);
2655
2657 {
2658 /* Wraparound emergency -- end current index scan */
2659 allindexes = false;
2660 break;
2661 }
2662 }
2663 }
2664 else
2665 {
2666 /* Outsource everything to parallel variant */
2668 vacrel->num_index_scans);
2669
2670 /*
2671 * Do a postcheck to consider applying wraparound failsafe now. Note
2672 * that parallel VACUUM only gets the precheck and this postcheck.
2673 */
2675 allindexes = false;
2676 }
2677
2678 /*
2679 * We delete all LP_DEAD items from the first heap pass in all indexes on
2680 * each call here (except calls where we choose to do the failsafe). This
2681 * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2682 * of the failsafe triggering, which prevents the next call from taking
2683 * place).
2684 */
2685 Assert(vacrel->num_index_scans > 0 ||
2686 vacrel->dead_items_info->num_items == vacrel->lpdead_items);
2688
2689 /*
2690 * Increase and report the number of index scans. Also, we reset
2691 * PROGRESS_VACUUM_INDEXES_TOTAL and PROGRESS_VACUUM_INDEXES_PROCESSED.
2692 *
2693 * We deliberately include the case where we started a round of bulk
2694 * deletes that we weren't able to finish due to the failsafe triggering.
2695 */
2696 vacrel->num_index_scans++;
2697 progress_end_val[0] = 0;
2698 progress_end_val[1] = 0;
2699 progress_end_val[2] = vacrel->num_index_scans;
2701
2702 return allindexes;
2703}
2704
2705/*
2706 * Read stream callback for vacuum's third phase (second pass over the heap).
2707 * Gets the next block from the TID store and returns it or InvalidBlockNumber
2708 * if there are no further blocks to vacuum.
2709 *
2710 * NB: Assumed to be safe to use with READ_STREAM_USE_BATCHING.
2711 */
2712static BlockNumber
2714 void *callback_private_data,
2715 void *per_buffer_data)
2716{
2717 TidStoreIter *iter = callback_private_data;
2719
2721 if (iter_result == NULL)
2722 return InvalidBlockNumber;
2723
2724 /*
2725 * Save the TidStoreIterResult for later, so we can extract the offsets.
2726 * It is safe to copy the result, according to TidStoreIterateNext().
2727 */
2728 memcpy(per_buffer_data, iter_result, sizeof(*iter_result));
2729
2730 return iter_result->blkno;
2731}
2732
2733/*
2734 * lazy_vacuum_heap_rel() -- second pass over the heap for two pass strategy
2735 *
2736 * This routine marks LP_DEAD items in vacrel->dead_items as LP_UNUSED. Pages
2737 * that never had lazy_scan_prune record LP_DEAD items are not visited at all.
2738 *
2739 * We may also be able to truncate the line pointer array of the heap pages we
2740 * visit. If there is a contiguous group of LP_UNUSED items at the end of the
2741 * array, it can be reclaimed as free space. These LP_UNUSED items usually
2742 * start out as LP_DEAD items recorded by lazy_scan_prune (we set items from
2743 * each page to LP_UNUSED, and then consider if it's possible to truncate the
2744 * page's line pointer array).
2745 *
2746 * Note: the reason for doing this as a second pass is we cannot remove the
2747 * tuples until we've removed their index entries, and we want to process
2748 * index entry removal in batches as large as possible.
2749 */
2750static void
2752{
2753 ReadStream *stream;
2755 Buffer vmbuffer = InvalidBuffer;
2757 TidStoreIter *iter;
2758
2759 Assert(vacrel->do_index_vacuuming);
2760 Assert(vacrel->do_index_cleanup);
2761 Assert(vacrel->num_index_scans > 0);
2762
2763 /* Report that we are now vacuuming the heap */
2766
2767 /* Update error traceback information */
2771
2772 iter = TidStoreBeginIterate(vacrel->dead_items);
2773
2774 /*
2775 * Set up the read stream for vacuum's second pass through the heap.
2776 *
2777 * It is safe to use batchmode, as vacuum_reap_lp_read_stream_next() does
2778 * not need to wait for IO and does not perform locking. Once we support
2779 * parallelism it should still be fine, as presumably the holder of locks
2780 * would never be blocked by IO while holding the lock.
2781 */
2784 vacrel->bstrategy,
2785 vacrel->rel,
2788 iter,
2789 sizeof(TidStoreIterResult));
2790
2791 while (true)
2792 {
2793 BlockNumber blkno;
2794 Buffer buf;
2795 Page page;
2797 Size freespace;
2799 int num_offsets;
2800
2801 vacuum_delay_point(false);
2802
2803 buf = read_stream_next_buffer(stream, (void **) &iter_result);
2804
2805 /* The relation is exhausted */
2806 if (!BufferIsValid(buf))
2807 break;
2808
2809 vacrel->blkno = blkno = BufferGetBlockNumber(buf);
2810
2813 Assert(num_offsets <= lengthof(offsets));
2814
2815 /*
2816 * Pin the visibility map page in case we need to mark the page
2817 * all-visible. In most cases this will be very cheap, because we'll
2818 * already have the correct page pinned anyway.
2819 */
2820 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
2821
2822 /* We need a non-cleanup exclusive lock to mark dead_items unused */
2824 lazy_vacuum_heap_page(vacrel, blkno, buf, offsets,
2825 num_offsets, vmbuffer);
2826
2827 /* Now that we've vacuumed the page, record its available space */
2828 page = BufferGetPage(buf);
2829 freespace = PageGetHeapFreeSpace(page);
2830
2832 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
2834 }
2835
2836 read_stream_end(stream);
2837 TidStoreEndIterate(iter);
2838
2839 vacrel->blkno = InvalidBlockNumber;
2840 if (BufferIsValid(vmbuffer))
2841 ReleaseBuffer(vmbuffer);
2842
2843 /*
2844 * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2845 * the second heap pass. No more, no less.
2846 */
2847 Assert(vacrel->num_index_scans > 1 ||
2848 (vacrel->dead_items_info->num_items == vacrel->lpdead_items &&
2849 vacuumed_pages == vacrel->lpdead_item_pages));
2850
2852 (errmsg("table \"%s\": removed %" PRId64 " dead item identifiers in %u pages",
2853 vacrel->relname, vacrel->dead_items_info->num_items,
2854 vacuumed_pages)));
2855
2856 /* Revert to the previous phase information for error traceback */
2858}
2859
2860/*
2861 * lazy_vacuum_heap_page() -- free page's LP_DEAD items listed in the
2862 * vacrel->dead_items store.
2863 *
2864 * Caller must have an exclusive buffer lock on the buffer (though a full
2865 * cleanup lock is also acceptable). vmbuffer must be valid and already have
2866 * a pin on blkno's visibility map page.
2867 */
2868static void
2870 OffsetNumber *deadoffsets, int num_offsets,
2871 Buffer vmbuffer)
2872{
2873 Page page = BufferGetPage(buffer);
2875 int nunused = 0;
2876 TransactionId visibility_cutoff_xid;
2878 bool all_frozen;
2880 uint8 vmflags = 0;
2881
2882 Assert(vacrel->do_index_vacuuming);
2883
2885
2886 /* Update error traceback information */
2890
2891 /*
2892 * Before marking dead items unused, check whether the page will become
2893 * all-visible once that change is applied. This lets us reap the tuples
2894 * and mark the page all-visible within the same critical section,
2895 * enabling both changes to be emitted in a single WAL record. Since the
2896 * visibility checks may perform I/O and allocate memory, they must be
2897 * done outside the critical section.
2898 */
2899 if (heap_page_would_be_all_visible(vacrel->rel, buffer,
2900 vacrel->cutoffs.OldestXmin,
2901 deadoffsets, num_offsets,
2902 &all_frozen, &visibility_cutoff_xid,
2903 &vacrel->offnum))
2904 {
2906 if (all_frozen)
2907 {
2909 Assert(!TransactionIdIsValid(visibility_cutoff_xid));
2910 }
2911
2912 /*
2913 * Take the lock on the vmbuffer before entering a critical section.
2914 * The heap page lock must also be held while updating the VM to
2915 * ensure consistency.
2916 */
2918 }
2919
2921
2922 for (int i = 0; i < num_offsets; i++)
2923 {
2924 ItemId itemid;
2925 OffsetNumber toff = deadoffsets[i];
2926
2927 itemid = PageGetItemId(page, toff);
2928
2929 Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2930 ItemIdSetUnused(itemid);
2931 unused[nunused++] = toff;
2932 }
2933
2934 Assert(nunused > 0);
2935
2936 /* Attempt to truncate line pointer array now */
2938
2939 if ((vmflags & VISIBILITYMAP_VALID_BITS) != 0)
2940 {
2941 /*
2942 * The page is guaranteed to have had dead line pointers, so we always
2943 * set PD_ALL_VISIBLE.
2944 */
2945 PageSetAllVisible(page);
2947 vmbuffer, vmflags,
2948 vacrel->rel->rd_locator);
2949 conflict_xid = visibility_cutoff_xid;
2950 }
2951
2952 /*
2953 * Mark buffer dirty before we write WAL.
2954 */
2955 MarkBufferDirty(buffer);
2956
2957 /* XLOG stuff */
2958 if (RelationNeedsWAL(vacrel->rel))
2959 {
2960 log_heap_prune_and_freeze(vacrel->rel, buffer,
2961 vmflags != 0 ? vmbuffer : InvalidBuffer,
2962 vmflags,
2964 false, /* no cleanup lock required */
2966 NULL, 0, /* frozen */
2967 NULL, 0, /* redirected */
2968 NULL, 0, /* dead */
2969 unused, nunused);
2970 }
2971
2973
2975 {
2976 /* Count the newly set VM page for logging */
2977 LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
2978 vacrel->new_all_visible_pages++;
2979 if (all_frozen)
2980 vacrel->new_all_visible_all_frozen_pages++;
2981 }
2982
2983 /* Revert to the previous phase information for error traceback */
2985}
2986
2987/*
2988 * Trigger the failsafe to avoid wraparound failure when vacrel table has a
2989 * relfrozenxid and/or relminmxid that is dangerously far in the past.
2990 * Triggering the failsafe makes the ongoing VACUUM bypass any further index
2991 * vacuuming and heap vacuuming. Truncating the heap is also bypassed.
2992 *
2993 * Any remaining work (work that VACUUM cannot just bypass) is typically sped
2994 * up when the failsafe triggers. VACUUM stops applying any cost-based delay
2995 * that it started out with.
2996 *
2997 * Returns true when failsafe has been triggered.
2998 */
2999static bool
3001{
3002 /* Don't warn more than once per VACUUM */
3004 return true;
3005
3007 {
3008 const int progress_index[] = {
3012 };
3014
3015 VacuumFailsafeActive = true;
3016
3017 /*
3018 * Abandon use of a buffer access strategy to allow use of all of
3019 * shared buffers. We assume the caller who allocated the memory for
3020 * the BufferAccessStrategy will free it.
3021 */
3022 vacrel->bstrategy = NULL;
3023
3024 /* Disable index vacuuming, index cleanup, and heap rel truncation */
3025 vacrel->do_index_vacuuming = false;
3026 vacrel->do_index_cleanup = false;
3027 vacrel->do_rel_truncate = false;
3028
3029 /* Reset the progress counters and set the failsafe mode */
3031
3033 (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
3034 vacrel->dbname, vacrel->relnamespace, vacrel->relname,
3035 vacrel->num_index_scans),
3036 errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
3037 errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
3038 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
3039
3040 /* Stop applying cost limits from this point on */
3041 VacuumCostActive = false;
3043
3044 return true;
3045 }
3046
3047 return false;
3048}
3049
3050/*
3051 * lazy_cleanup_all_indexes() -- cleanup all indexes of relation.
3052 */
3053static void
3055{
3056 double reltuples = vacrel->new_rel_tuples;
3057 bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
3058 const int progress_start_index[] = {
3061 };
3062 const int progress_end_index[] = {
3065 };
3067 int64 progress_end_val[2] = {0, 0};
3068
3069 Assert(vacrel->do_index_cleanup);
3070 Assert(vacrel->nindexes > 0);
3071
3072 /*
3073 * Report that we are now cleaning up indexes and the number of indexes to
3074 * cleanup.
3075 */
3077 progress_start_val[1] = vacrel->nindexes;
3079
3081 {
3082 for (int idx = 0; idx < vacrel->nindexes; idx++)
3083 {
3084 Relation indrel = vacrel->indrels[idx];
3085 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
3086
3087 vacrel->indstats[idx] =
3088 lazy_cleanup_one_index(indrel, istat, reltuples,
3089 estimated_count, vacrel);
3090
3091 /* Report the number of indexes cleaned up */
3093 idx + 1);
3094 }
3095 }
3096 else
3097 {
3098 /* Outsource everything to parallel variant */
3100 vacrel->num_index_scans,
3101 estimated_count);
3102 }
3103
3104 /* Reset the progress counters */
3106}
3107
3108/*
3109 * lazy_vacuum_one_index() -- vacuum index relation.
3110 *
3111 * Delete all the index tuples containing a TID collected in
3112 * vacrel->dead_items. Also update running statistics. Exact
3113 * details depend on index AM's ambulkdelete routine.
3114 *
3115 * reltuples is the number of heap tuples to be passed to the
3116 * bulkdelete callback. It's always assumed to be estimated.
3117 * See indexam.sgml for more info.
3118 *
3119 * Returns bulk delete stats derived from input stats
3120 */
3121static IndexBulkDeleteResult *
3123 double reltuples, LVRelState *vacrel)
3124{
3127
3128 ivinfo.index = indrel;
3129 ivinfo.heaprel = vacrel->rel;
3130 ivinfo.analyze_only = false;
3131 ivinfo.report_progress = false;
3132 ivinfo.estimated_count = true;
3133 ivinfo.message_level = DEBUG2;
3134 ivinfo.num_heap_tuples = reltuples;
3135 ivinfo.strategy = vacrel->bstrategy;
3136
3137 /*
3138 * Update error traceback information.
3139 *
3140 * The index name is saved during this phase and restored immediately
3141 * after this phase. See vacuum_error_callback.
3142 */
3143 Assert(vacrel->indname == NULL);
3148
3149 /* Do bulk deletion */
3150 istat = vac_bulkdel_one_index(&ivinfo, istat, vacrel->dead_items,
3151 vacrel->dead_items_info);
3152
3153 /* Revert to the previous phase information for error traceback */
3155 pfree(vacrel->indname);
3156 vacrel->indname = NULL;
3157
3158 return istat;
3159}
3160
3161/*
3162 * lazy_cleanup_one_index() -- do post-vacuum cleanup for index relation.
3163 *
3164 * Calls index AM's amvacuumcleanup routine. reltuples is the number
3165 * of heap tuples and estimated_count is true if reltuples is an
3166 * estimated value. See indexam.sgml for more info.
3167 *
3168 * Returns bulk delete stats derived from input stats
3169 */
3170static IndexBulkDeleteResult *
3172 double reltuples, bool estimated_count,
3174{
3177
3178 ivinfo.index = indrel;
3179 ivinfo.heaprel = vacrel->rel;
3180 ivinfo.analyze_only = false;
3181 ivinfo.report_progress = false;
3182 ivinfo.estimated_count = estimated_count;
3183 ivinfo.message_level = DEBUG2;
3184
3185 ivinfo.num_heap_tuples = reltuples;
3186 ivinfo.strategy = vacrel->bstrategy;
3187
3188 /*
3189 * Update error traceback information.
3190 *
3191 * The index name is saved during this phase and restored immediately
3192 * after this phase. See vacuum_error_callback.
3193 */
3194 Assert(vacrel->indname == NULL);
3199
3200 istat = vac_cleanup_one_index(&ivinfo, istat);
3201
3202 /* Revert to the previous phase information for error traceback */
3204 pfree(vacrel->indname);
3205 vacrel->indname = NULL;
3206
3207 return istat;
3208}
3209
3210/*
3211 * should_attempt_truncation - should we attempt to truncate the heap?
3212 *
3213 * Don't even think about it unless we have a shot at releasing a goodly
3214 * number of pages. Otherwise, the time taken isn't worth it, mainly because
3215 * an AccessExclusive lock must be replayed on any hot standby, where it can
3216 * be particularly disruptive.
3217 *
3218 * Also don't attempt it if wraparound failsafe is in effect. The entire
3219 * system might be refusing to allocate new XIDs at this point. The system
3220 * definitely won't return to normal unless and until VACUUM actually advances
3221 * the oldest relfrozenxid -- which hasn't happened for target rel just yet.
3222 * If lazy_truncate_heap attempted to acquire an AccessExclusiveLock to
3223 * truncate the table under these circumstances, an XID exhaustion error might
3224 * make it impossible for VACUUM to fix the underlying XID exhaustion problem.
3225 * There is very little chance of truncation working out when the failsafe is
3226 * in effect in any case. lazy_scan_prune makes the optimistic assumption
3227 * that any LP_DEAD items it encounters will always be LP_UNUSED by the time
3228 * we're called.
3229 */
3230static bool
3232{
3234
3235 if (!vacrel->do_rel_truncate || VacuumFailsafeActive)
3236 return false;
3237
3238 possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
3239 if (possibly_freeable > 0 &&
3242 return true;
3243
3244 return false;
3245}
3246
3247/*
3248 * lazy_truncate_heap - try to truncate off any empty pages at the end
3249 */
3250static void
3252{
3253 BlockNumber orig_rel_pages = vacrel->rel_pages;
3256 int lock_retry;
3257
3258 /* Report that we are now truncating */
3261
3262 /* Update error traceback information one last time */
3264 vacrel->nonempty_pages, InvalidOffsetNumber);
3265
3266 /*
3267 * Loop until no more truncating can be done.
3268 */
3269 do
3270 {
3271 /*
3272 * We need full exclusive lock on the relation in order to do
3273 * truncation. If we can't get it, give up rather than waiting --- we
3274 * don't want to block other backends, and we don't want to deadlock
3275 * (which is quite possible considering we already hold a lower-grade
3276 * lock).
3277 */
3278 lock_waiter_detected = false;
3279 lock_retry = 0;
3280 while (true)
3281 {
3283 break;
3284
3285 /*
3286 * Check for interrupts while trying to (re-)acquire the exclusive
3287 * lock.
3288 */
3290
3293 {
3294 /*
3295 * We failed to establish the lock in the specified number of
3296 * retries. This means we give up truncating.
3297 */
3298 ereport(vacrel->verbose ? INFO : DEBUG2,
3299 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
3300 vacrel->relname)));
3301 return;
3302 }
3303
3309 }
3310
3311 /*
3312 * Now that we have exclusive lock, look to see if the rel has grown
3313 * whilst we were vacuuming with non-exclusive lock. If so, give up;
3314 * the newly added pages presumably contain non-deletable tuples.
3315 */
3318 {
3319 /*
3320 * Note: we intentionally don't update vacrel->rel_pages with the
3321 * new rel size here. If we did, it would amount to assuming that
3322 * the new pages are empty, which is unlikely. Leaving the numbers
3323 * alone amounts to assuming that the new pages have the same
3324 * tuple density as existing ones, which is less unlikely.
3325 */
3327 return;
3328 }
3329
3330 /*
3331 * Scan backwards from the end to verify that the end pages actually
3332 * contain no tuples. This is *necessary*, not optional, because
3333 * other backends could have added tuples to these pages whilst we
3334 * were vacuuming.
3335 */
3337 vacrel->blkno = new_rel_pages;
3338
3340 {
3341 /* can't do anything after all */
3343 return;
3344 }
3345
3346 /*
3347 * Okay to truncate.
3348 */
3350
3351 /*
3352 * We can release the exclusive lock as soon as we have truncated.
3353 * Other backends can't safely access the relation until they have
3354 * processed the smgr invalidation that smgrtruncate sent out ... but
3355 * that should happen as part of standard invalidation processing once
3356 * they acquire lock on the relation.
3357 */
3359
3360 /*
3361 * Update statistics. Here, it *is* correct to adjust rel_pages
3362 * without also touching reltuples, since the tuple count wasn't
3363 * changed by the truncation.
3364 */
3365 vacrel->removed_pages += orig_rel_pages - new_rel_pages;
3366 vacrel->rel_pages = new_rel_pages;
3367
3368 ereport(vacrel->verbose ? INFO : DEBUG2,
3369 (errmsg("table \"%s\": truncated %u to %u pages",
3370 vacrel->relname,
3373 } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
3374}
3375
3376/*
3377 * Rescan end pages to verify that they are (still) empty of tuples.
3378 *
3379 * Returns number of nondeletable pages (last nonempty page + 1).
3380 */
3381static BlockNumber
3383{
3385 "prefetch size must be power of 2");
3386
3387 BlockNumber blkno;
3389 instr_time starttime;
3390
3391 /* Initialize the starttime if we check for conflicting lock requests */
3392 INSTR_TIME_SET_CURRENT(starttime);
3393
3394 /*
3395 * Start checking blocks at what we believe relation end to be and move
3396 * backwards. (Strange coding of loop control is needed because blkno is
3397 * unsigned.) To make the scan faster, we prefetch a few blocks at a time
3398 * in forward direction, so that OS-level readahead can kick in.
3399 */
3400 blkno = vacrel->rel_pages;
3402 while (blkno > vacrel->nonempty_pages)
3403 {
3404 Buffer buf;
3405 Page page;
3406 OffsetNumber offnum,
3407 maxoff;
3408 bool hastup;
3409
3410 /*
3411 * Check if another process requests a lock on our relation. We are
3412 * holding an AccessExclusiveLock here, so they will be waiting. We
3413 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
3414 * only check if that interval has elapsed once every 32 blocks to
3415 * keep the number of system calls and actual shared lock table
3416 * lookups to a minimum.
3417 */
3418 if ((blkno % 32) == 0)
3419 {
3422
3425 INSTR_TIME_SUBTRACT(elapsed, starttime);
3426 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
3428 {
3430 {
3431 ereport(vacrel->verbose ? INFO : DEBUG2,
3432 (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
3433 vacrel->relname)));
3434
3435 *lock_waiter_detected = true;
3436 return blkno;
3437 }
3438 starttime = currenttime;
3439 }
3440 }
3441
3442 /*
3443 * We don't insert a vacuum delay point here, because we have an
3444 * exclusive lock on the table which we want to hold for as short a
3445 * time as possible. We still need to check for interrupts however.
3446 */
3448
3449 blkno--;
3450
3451 /* If we haven't prefetched this lot yet, do so now. */
3452 if (prefetchedUntil > blkno)
3453 {
3456
3457 prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
3458 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3459 {
3462 }
3464 }
3465
3467 vacrel->bstrategy);
3468
3469 /* In this phase we only need shared access to the buffer */
3471
3472 page = BufferGetPage(buf);
3473
3474 if (PageIsNew(page) || PageIsEmpty(page))
3475 {
3477 continue;
3478 }
3479
3480 hastup = false;
3481 maxoff = PageGetMaxOffsetNumber(page);
3482 for (offnum = FirstOffsetNumber;
3483 offnum <= maxoff;
3484 offnum = OffsetNumberNext(offnum))
3485 {
3486 ItemId itemid;
3487
3488 itemid = PageGetItemId(page, offnum);
3489
3490 /*
3491 * Note: any non-unused item should be taken as a reason to keep
3492 * this page. Even an LP_DEAD item makes truncation unsafe, since
3493 * we must not have cleaned out its index entries.
3494 */
3495 if (ItemIdIsUsed(itemid))
3496 {
3497 hastup = true;
3498 break; /* can stop scanning */
3499 }
3500 } /* scan along page */
3501
3503
3504 /* Done scanning if we found a tuple here */
3505 if (hastup)
3506 return blkno + 1;
3507 }
3508
3509 /*
3510 * If we fall out of the loop, all the previously-thought-to-be-empty
3511 * pages still are; we need not bother to look at the last known-nonempty
3512 * page.
3513 */
3514 return vacrel->nonempty_pages;
3515}
3516
3517/*
3518 * Allocate dead_items and dead_items_info (either using palloc, or in dynamic
3519 * shared memory). Sets both in vacrel for caller.
3520 *
3521 * Also handles parallel initialization as part of allocating dead_items in
3522 * DSM when required.
3523 */
3524static void
3525dead_items_alloc(LVRelState *vacrel, int nworkers)
3526{
3527 VacDeadItemsInfo *dead_items_info;
3529 autovacuum_work_mem != -1 ?
3531
3532 /*
3533 * Initialize state for a parallel vacuum. As of now, only one worker can
3534 * be used for an index, so we invoke parallelism only if there are at
3535 * least two indexes on a table.
3536 */
3537 if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
3538 {
3539 /*
3540 * Since parallel workers cannot access data in temporary tables, we
3541 * can't perform parallel vacuum on them.
3542 */
3544 {
3545 /*
3546 * Give warning only if the user explicitly tries to perform a
3547 * parallel vacuum on the temporary table.
3548 */
3549 if (nworkers > 0)
3551 (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3552 vacrel->relname)));
3553 }
3554 else
3555 vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
3556 vacrel->nindexes, nworkers,
3558 vacrel->verbose ? INFO : DEBUG2,
3559 vacrel->bstrategy);
3560
3561 /*
3562 * If parallel mode started, dead_items and dead_items_info spaces are
3563 * allocated in DSM.
3564 */
3566 {
3568 &vacrel->dead_items_info);
3569 return;
3570 }
3571 }
3572
3573 /*
3574 * Serial VACUUM case. Allocate both dead_items and dead_items_info
3575 * locally.
3576 */
3577
3578 dead_items_info = palloc_object(VacDeadItemsInfo);
3579 dead_items_info->max_bytes = vac_work_mem * (Size) 1024;
3580 dead_items_info->num_items = 0;
3581 vacrel->dead_items_info = dead_items_info;
3582
3583 vacrel->dead_items = TidStoreCreateLocal(dead_items_info->max_bytes, true);
3584}
3585
3586/*
3587 * Add the given block number and offset numbers to dead_items.
3588 */
3589static void
3591 int num_offsets)
3592{
3593 const int prog_index[2] = {
3596 };
3597 int64 prog_val[2];
3598
3599 TidStoreSetBlockOffsets(vacrel->dead_items, blkno, offsets, num_offsets);
3600 vacrel->dead_items_info->num_items += num_offsets;
3601
3602 /* update the progress information */
3603 prog_val[0] = vacrel->dead_items_info->num_items;
3604 prog_val[1] = TidStoreMemoryUsage(vacrel->dead_items);
3606}
3607
3608/*
3609 * Forget all collected dead items.
3610 */
3611static void
3613{
3614 /* Update statistics for dead items */
3615 vacrel->num_dead_items_resets++;
3616 vacrel->total_dead_items_bytes += TidStoreMemoryUsage(vacrel->dead_items);
3617
3619 {
3621 vacrel->dead_items = parallel_vacuum_get_dead_items(vacrel->pvs,
3622 &vacrel->dead_items_info);
3623 return;
3624 }
3625
3626 /* Recreate the tidstore with the same max_bytes limitation */
3627 TidStoreDestroy(vacrel->dead_items);
3628 vacrel->dead_items = TidStoreCreateLocal(vacrel->dead_items_info->max_bytes, true);
3629
3630 /* Reset the counter */
3631 vacrel->dead_items_info->num_items = 0;
3632}
3633
3634/*
3635 * Perform cleanup for resources allocated in dead_items_alloc
3636 */
3637static void
3639{
3641 {
3642 /* Don't bother with pfree here */
3643 return;
3644 }
3645
3646 /* End parallel mode */
3647 parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
3648 vacrel->pvs = NULL;
3649}
3650
3651#ifdef USE_ASSERT_CHECKING
3652
3653/*
3654 * Wrapper for heap_page_would_be_all_visible() which can be used for callers
3655 * that expect no LP_DEAD on the page. Currently assert-only, but there is no
3656 * reason not to use it outside of asserts.
3657 */
3658static bool
3660 TransactionId OldestXmin,
3661 bool *all_frozen,
3662 TransactionId *visibility_cutoff_xid,
3664{
3665
3667 OldestXmin,
3668 NULL, 0,
3669 all_frozen,
3670 visibility_cutoff_xid,
3672}
3673#endif
3674
3675/*
3676 * Check whether the heap page in buf is all-visible except for the dead
3677 * tuples referenced in the deadoffsets array.
3678 *
3679 * Vacuum uses this to check if a page would become all-visible after reaping
3680 * known dead tuples. This function does not remove the dead items.
3681 *
3682 * This cannot be called in a critical section, as the visibility checks may
3683 * perform IO and allocate memory.
3684 *
3685 * Returns true if the page is all-visible other than the provided
3686 * deadoffsets and false otherwise.
3687 *
3688 * OldestXmin is used to determine visibility.
3689 *
3690 * Output parameters:
3691 *
3692 * - *all_frozen: true if every tuple on the page is frozen
3693 * - *visibility_cutoff_xid: newest xmin; valid only if page is all-visible
3694 * - *logging_offnum: OffsetNumber of current tuple being processed;
3695 * used by vacuum's error callback system.
3696 *
3697 * Callers looking to verify that the page is already all-visible can call
3698 * heap_page_is_all_visible().
3699 *
3700 * This logic is closely related to heap_prune_record_unchanged_lp_normal().
3701 * If you modify this function, ensure consistency with that code. An
3702 * assertion cross-checks that both remain in agreement. Do not introduce new
3703 * side-effects.
3704 */
3705static bool
3707 TransactionId OldestXmin,
3708 OffsetNumber *deadoffsets,
3709 int ndeadoffsets,
3710 bool *all_frozen,
3711 TransactionId *visibility_cutoff_xid,
3713{
3714 Page page = BufferGetPage(buf);
3716 OffsetNumber offnum,
3717 maxoff;
3718 bool all_visible = true;
3719 int matched_dead_count = 0;
3720
3721 *visibility_cutoff_xid = InvalidTransactionId;
3722 *all_frozen = true;
3723
3724 Assert(ndeadoffsets == 0 || deadoffsets);
3725
3726#ifdef USE_ASSERT_CHECKING
3727 /* Confirm input deadoffsets[] is strictly sorted */
3728 if (ndeadoffsets > 1)
3729 {
3730 for (int i = 1; i < ndeadoffsets; i++)
3731 Assert(deadoffsets[i - 1] < deadoffsets[i]);
3732 }
3733#endif
3734
3735 maxoff = PageGetMaxOffsetNumber(page);
3736 for (offnum = FirstOffsetNumber;
3737 offnum <= maxoff && all_visible;
3738 offnum = OffsetNumberNext(offnum))
3739 {
3740 ItemId itemid;
3741 HeapTupleData tuple;
3743
3744 /*
3745 * Set the offset number so that we can display it along with any
3746 * error that occurred while processing this tuple.
3747 */
3748 *logging_offnum = offnum;
3749 itemid = PageGetItemId(page, offnum);
3750
3751 /* Unused or redirect line pointers are of no interest */
3752 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
3753 continue;
3754
3755 ItemPointerSet(&(tuple.t_self), blockno, offnum);
3756
3757 /*
3758 * Dead line pointers can have index pointers pointing to them. So
3759 * they can't be treated as visible
3760 */
3761 if (ItemIdIsDead(itemid))
3762 {
3763 if (!deadoffsets ||
3765 deadoffsets[matched_dead_count] != offnum)
3766 {
3767 *all_frozen = all_visible = false;
3768 break;
3769 }
3771 continue;
3772 }
3773
3774 Assert(ItemIdIsNormal(itemid));
3775
3776 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3777 tuple.t_len = ItemIdGetLength(itemid);
3778 tuple.t_tableOid = RelationGetRelid(rel);
3779
3780 /* Visibility checks may do IO or allocate memory */
3783 {
3784 case HEAPTUPLE_LIVE:
3785 {
3786 TransactionId xmin;
3787
3788 /* Check comments in lazy_scan_prune. */
3790 {
3791 all_visible = false;
3792 *all_frozen = false;
3793 break;
3794 }
3795
3796 /*
3797 * The inserter definitely committed. But is it old enough
3798 * that everyone sees it as committed?
3799 */
3800 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3801 if (!TransactionIdPrecedes(xmin, OldestXmin))
3802 {
3803 all_visible = false;
3804 *all_frozen = false;
3805 break;
3806 }
3807
3808 /* Track newest xmin on page. */
3809 if (TransactionIdFollows(xmin, *visibility_cutoff_xid) &&
3811 *visibility_cutoff_xid = xmin;
3812
3813 /* Check whether this tuple is already frozen or not */
3814 if (all_visible && *all_frozen &&
3816 *all_frozen = false;
3817 }
3818 break;
3819
3820 case HEAPTUPLE_DEAD:
3824 {
3825 all_visible = false;
3826 *all_frozen = false;
3827 break;
3828 }
3829 default:
3830 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3831 break;
3832 }
3833 } /* scan along page */
3834
3835 /* Clear the offset information once we have processed the given page. */
3837
3838 return all_visible;
3839}
3840
3841/*
3842 * Update index statistics in pg_class if the statistics are accurate.
3843 */
3844static void
3846{
3847 Relation *indrels = vacrel->indrels;
3848 int nindexes = vacrel->nindexes;
3849 IndexBulkDeleteResult **indstats = vacrel->indstats;
3850
3851 Assert(vacrel->do_index_cleanup);
3852
3853 for (int idx = 0; idx < nindexes; idx++)
3854 {
3855 Relation indrel = indrels[idx];
3856 IndexBulkDeleteResult *istat = indstats[idx];
3857
3858 if (istat == NULL || istat->estimated_count)
3859 continue;
3860
3861 /* Update index statistics */
3863 istat->num_pages,
3864 istat->num_index_tuples,
3865 0, 0,
3866 false,
3869 NULL, NULL, false);
3870 }
3871}
3872
3873/*
3874 * Error context callback for errors occurring during vacuum. The error
3875 * context messages for index phases should match the messages set in parallel
3876 * vacuum. If you change this function for those phases, change
3877 * parallel_vacuum_error_callback() as well.
3878 */
3879static void
3881{
3883
3884 switch (errinfo->phase)
3885 {
3887 if (BlockNumberIsValid(errinfo->blkno))
3888 {
3889 if (OffsetNumberIsValid(errinfo->offnum))
3890 errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3891 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3892 else
3893 errcontext("while scanning block %u of relation \"%s.%s\"",
3894 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3895 }
3896 else
3897 errcontext("while scanning relation \"%s.%s\"",
3898 errinfo->relnamespace, errinfo->relname);
3899 break;
3900
3902 if (BlockNumberIsValid(errinfo->blkno))
3903 {
3904 if (OffsetNumberIsValid(errinfo->offnum))
3905 errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3906 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3907 else
3908 errcontext("while vacuuming block %u of relation \"%s.%s\"",
3909 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3910 }
3911 else
3912 errcontext("while vacuuming relation \"%s.%s\"",
3913 errinfo->relnamespace, errinfo->relname);
3914 break;
3915
3917 errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3918 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3919 break;
3920
3922 errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3923 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3924 break;
3925
3927 if (BlockNumberIsValid(errinfo->blkno))
3928 errcontext("while truncating relation \"%s.%s\" to %u blocks",
3929 errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3930 break;
3931
3933 default:
3934 return; /* do nothing; the errinfo may not be
3935 * initialized */
3936 }
3937}
3938
3939/*
3940 * Updates the information required for vacuum error callback. This also saves
3941 * the current information which can be later restored via restore_vacuum_error_info.
3942 */
3943static void
3945 int phase, BlockNumber blkno, OffsetNumber offnum)
3946{
3947 if (saved_vacrel)
3948 {
3949 saved_vacrel->offnum = vacrel->offnum;
3950 saved_vacrel->blkno = vacrel->blkno;
3951 saved_vacrel->phase = vacrel->phase;
3952 }
3953
3954 vacrel->blkno = blkno;
3955 vacrel->offnum = offnum;
3956 vacrel->phase = phase;
3957}
3958
3959/*
3960 * Restores the vacuum information saved via a prior call to update_vacuum_error_info.
3961 */
3962static void
3965{
3966 vacrel->blkno = saved_vacrel->blkno;
3967 vacrel->offnum = saved_vacrel->offnum;
3968 vacrel->phase = saved_vacrel->phase;
3969}
Datum idx(PG_FUNCTION_ARGS)
Definition _int_op.c:262
int autovacuum_work_mem
Definition autovacuum.c:120
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition timestamp.c:1721
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition timestamp.c:1781
TimestampTz GetCurrentTimestamp(void)
Definition timestamp.c:1645
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
PgBackendStatus * MyBEEntry
uint32 BlockNumber
Definition block.h:31
#define InvalidBlockNumber
Definition block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition block.h:71
int Buffer
Definition buf.h:23
#define InvalidBuffer
Definition buf.h:25
bool track_io_timing
Definition bufmgr.c:177
void CheckBufferIsPinnedOnce(Buffer buffer)
Definition bufmgr.c:6495
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4357
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition bufmgr.c:773
bool BufferIsLockedByMeInMode(Buffer buffer, BufferLockMode mode)
Definition bufmgr.c:2998
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5502
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5519
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3057
void LockBufferForCleanup(Buffer buffer)
Definition bufmgr.c:6528
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition bufmgr.c:912
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition bufmgr.c:6701
#define RelationGetNumberOfBlocks(reln)
Definition bufmgr.h:307
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:466
@ BUFFER_LOCK_SHARE
Definition bufmgr.h:210
@ BUFFER_LOCK_EXCLUSIVE
Definition bufmgr.h:220
@ BUFFER_LOCK_UNLOCK
Definition bufmgr.h:205
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:328
@ RBM_NORMAL
Definition bufmgr.h:46
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:417
Size PageGetHeapFreeSpace(const PageData *page)
Definition bufpage.c:990
void PageTruncateLinePointerArray(Page page)
Definition bufpage.c:834
static bool PageIsEmpty(const PageData *page)
Definition bufpage.h:223
static bool PageIsAllVisible(const PageData *page)
Definition bufpage.h:428
static void PageClearAllVisible(Page page)
Definition bufpage.h:438
static bool PageIsNew(const PageData *page)
Definition bufpage.h:233
#define SizeOfPageHeaderData
Definition bufpage.h:216
static void PageSetAllVisible(Page page)
Definition bufpage.h:433
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:243
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:353
PageData * Page
Definition bufpage.h:81
static XLogRecPtr PageGetLSN(const PageData *page)
Definition bufpage.h:385
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:371
uint8_t uint8
Definition c.h:556
#define ngettext(s, p, n)
Definition c.h:1198
#define Max(x, y)
Definition c.h:1013
#define Assert(condition)
Definition c.h:885
int64_t int64
Definition c.h:555
TransactionId MultiXactId
Definition c.h:688
int32_t int32
Definition c.h:554
#define unlikely(x)
Definition c.h:424
uint32_t uint32
Definition c.h:558
#define lengthof(array)
Definition c.h:815
#define StaticAssertDecl(condition, errmessage)
Definition c.h:950
uint32 TransactionId
Definition c.h:678
size_t Size
Definition c.h:631
int64 TimestampTz
Definition timestamp.h:39
Datum arg
Definition elog.c:1322
ErrorContextCallback * error_context_stack
Definition elog.c:99
int errcode(int sqlerrcode)
Definition elog.c:874
int errmsg(const char *fmt,...)
Definition elog.c:1093
#define _(x)
Definition elog.c:95
#define LOG
Definition elog.h:31
#define errcontext
Definition elog.h:198
int errhint(const char *fmt,...) pg_attribute_printf(1
int errdetail(const char *fmt,...) pg_attribute_printf(1
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define WARNING
Definition elog.h:36
#define DEBUG2
Definition elog.h:29
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define INFO
Definition elog.h:34
#define ereport(elevel,...)
Definition elog.h:150
#define palloc_object(type)
Definition fe_memutils.h:74
#define palloc_array(type, count)
Definition fe_memutils.h:76
#define palloc0_object(type)
Definition fe_memutils.h:75
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition freespace.c:377
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition freespace.c:244
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition freespace.c:194
bool VacuumCostActive
Definition globals.c:158
int VacuumCostBalance
Definition globals.c:157
int maintenance_work_mem
Definition globals.c:133
volatile uint32 CritSectionCount
Definition globals.c:45
struct Latch * MyLatch
Definition globals.c:63
Oid MyDatabaseId
Definition globals.c:94
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition heapam.c:7891
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition heapam.c:7946
#define HEAP_PAGE_PRUNE_FREEZE
Definition heapam.h:44
@ HEAPTUPLE_RECENTLY_DEAD
Definition heapam.h:128
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition heapam.h:129
@ HEAPTUPLE_LIVE
Definition heapam.h:127
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition heapam.h:130
@ HEAPTUPLE_DEAD
Definition heapam.h:126
@ PRUNE_VACUUM_CLEANUP
Definition heapam.h:230
@ PRUNE_VACUUM_SCAN
Definition heapam.h:229
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
Definition heapam.h:43
HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer, TransactionId *dead_after)
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition htup.h:23
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
#define MaxHeapTuplesPerPage
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
#define false
#define INSTR_TIME_SET_CURRENT(t)
Definition instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition instr_time.h:177
#define INSTR_TIME_GET_MICROSEC(t)
Definition instr_time.h:192
WalUsage pgWalUsage
Definition instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition instrument.c:289
BufferUsage pgBufferUsage
Definition instrument.c:20
void BufferUsageAccumDiff(BufferUsage *dst, const BufferUsage *add, const BufferUsage *sub)
Definition instrument.c:249
static int pg_cmp_u16(uint16 a, uint16 b)
Definition int.h:707
int b
Definition isn.c:74
int a
Definition isn.c:73
int i
Definition isn.c:77
#define ItemIdGetLength(itemId)
Definition itemid.h:59
#define ItemIdIsNormal(itemId)
Definition itemid.h:99
#define ItemIdIsDead(itemId)
Definition itemid.h:113
#define ItemIdIsUsed(itemId)
Definition itemid.h:92
#define ItemIdSetUnused(itemId)
Definition itemid.h:128
#define ItemIdIsRedirected(itemId)
Definition itemid.h:106
#define ItemIdHasStorage(itemId)
Definition itemid.h:120
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition itemptr.h:135
void ResetLatch(Latch *latch)
Definition latch.c:374
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition latch.c:172
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition lmgr.c:314
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition lmgr.c:278
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition lmgr.c:367
#define NoLock
Definition lockdefs.h:34
#define AccessExclusiveLock
Definition lockdefs.h:43
#define RowExclusiveLock
Definition lockdefs.h:38
char * get_database_name(Oid dbid)
Definition lsyscache.c:1242
char * get_namespace_name(Oid nspid)
Definition lsyscache.c:3518
char * pstrdup(const char *in)
Definition mcxt.c:1781
void pfree(void *pointer)
Definition mcxt.c:1616
void * palloc0(Size size)
Definition mcxt.c:1417
#define AmAutoVacuumWorkerProcess()
Definition miscadmin.h:383
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
#define END_CRIT_SECTION()
Definition miscadmin.h:152
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition multixact.c:2765
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition multixact.c:2779
#define MultiXactIdIsValid(multi)
Definition multixact.h:29
#define InvalidMultiXactId
Definition multixact.h:25
#define InvalidOffsetNumber
Definition off.h:26
#define OffsetNumberIsValid(offsetNumber)
Definition off.h:39
#define OffsetNumberNext(offsetNumber)
Definition off.h:52
uint16 OffsetNumber
Definition off.h:24
#define FirstOffsetNumber
Definition off.h:27
#define MaxOffsetNumber
Definition off.h:28
static int verbose
#define ERRCODE_DATA_CORRUPTED
NameData relname
Definition pg_class.h:40
const void * data
uint32 pg_prng_uint32(pg_prng_state *state)
Definition pg_prng.c:227
pg_prng_state pg_global_prng_state
Definition pg_prng.c:34
const char * pg_rusage_show(const PGRUsage *ru0)
Definition pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition pg_rusage.c:27
static char buf[DEFAULT_XLOG_SEG_SIZE]
int64 PgStat_Counter
Definition pgstat.h:71
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Relation rel, PgStat_Counter livetuples, PgStat_Counter deadtuples, TimestampTz starttime)
#define qsort(a, b, c, d)
Definition port.h:495
static int fb(int x)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition procarray.c:4118
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition progress.h:41
#define PROGRESS_VACUUM_MODE
Definition progress.h:32
#define PROGRESS_VACUUM_MODE_NORMAL
Definition progress.h:44
#define PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM
Definition progress.h:50
#define PROGRESS_VACUUM_DEAD_TUPLE_BYTES
Definition progress.h:27
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition progress.h:36
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition progress.h:22
#define PROGRESS_VACUUM_PHASE
Definition progress.h:21
#define PROGRESS_VACUUM_DELAY_TIME
Definition progress.h:31
#define PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM_WRAPAROUND
Definition progress.h:51
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition progress.h:38
#define PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS
Definition progress.h:28
#define PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES
Definition progress.h:26
#define PROGRESS_VACUUM_STARTED_BY_MANUAL
Definition progress.h:49
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition progress.h:23
#define PROGRESS_VACUUM_STARTED_BY
Definition progress.h:33
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition progress.h:39
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition progress.h:37
#define PROGRESS_VACUUM_MODE_FAILSAFE
Definition progress.h:46
#define PROGRESS_VACUUM_INDEXES_PROCESSED
Definition progress.h:30
#define PROGRESS_VACUUM_INDEXES_TOTAL
Definition progress.h:29
#define PROGRESS_VACUUM_MODE_AGGRESSIVE
Definition progress.h:45
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition progress.h:24
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition progress.h:40
void heap_page_prune_and_freeze(PruneFreezeParams *params, PruneFreezeResult *presult, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
Definition pruneheap.c:819
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, Buffer vmbuffer, uint8 vmflags, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Definition pruneheap.c:2167
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
void read_stream_end(ReadStream *stream)
#define READ_STREAM_MAINTENANCE
Definition read_stream.h:28
#define READ_STREAM_USE_BATCHING
Definition read_stream.h:64
#define RelationGetRelid(relation)
Definition rel.h:514
#define RelationGetRelationName(relation)
Definition rel.h:548
#define RelationNeedsWAL(relation)
Definition rel.h:637
#define RelationUsesLocalBuffers(relation)
Definition rel.h:646
#define RelationGetNamespace(relation)
Definition rel.h:555
@ MAIN_FORKNUM
Definition relpath.h:58
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition storage.c:289
char * dbname
Definition streamutil.c:49
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition stringinfo.c:145
void appendStringInfoString(StringInfo str, const char *s)
Definition stringinfo.c:230
void initStringInfo(StringInfo str)
Definition stringinfo.c:97
int64 shared_blks_dirtied
Definition instrument.h:28
int64 local_blks_hit
Definition instrument.h:30
int64 shared_blks_read
Definition instrument.h:27
int64 local_blks_read
Definition instrument.h:31
int64 local_blks_dirtied
Definition instrument.h:32
int64 shared_blks_hit
Definition instrument.h:26
struct ErrorContextCallback * previous
Definition elog.h:297
void(* callback)(void *arg)
Definition elog.h:298
ItemPointerData t_self
Definition htup.h:65
uint32 t_len
Definition htup.h:64
HeapTupleHeader t_data
Definition htup.h:68
Oid t_tableOid
Definition htup.h:66
BlockNumber pages_deleted
Definition genam.h:88
BlockNumber pages_newly_deleted
Definition genam.h:87
BlockNumber pages_free
Definition genam.h:89
BlockNumber num_pages
Definition genam.h:83
double num_index_tuples
Definition genam.h:85
BlockNumber next_eager_scan_region_start
Definition vacuumlazy.c:371
ParallelVacuumState * pvs
Definition vacuumlazy.c:259
bool next_unskippable_eager_scanned
Definition vacuumlazy.c:356
VacDeadItemsInfo * dead_items_info
Definition vacuumlazy.c:302
Buffer next_unskippable_vmbuffer
Definition vacuumlazy.c:357
OffsetNumber offnum
Definition vacuumlazy.c:287
TidStore * dead_items
Definition vacuumlazy.c:301
int64 tuples_deleted
Definition vacuumlazy.c:346
BlockNumber nonempty_pages
Definition vacuumlazy.c:333
BlockNumber eager_scan_remaining_fails
Definition vacuumlazy.c:403
bool do_rel_truncate
Definition vacuumlazy.c:271
BlockNumber scanned_pages
Definition vacuumlazy.c:305
int num_dead_items_resets
Definition vacuumlazy.c:343
bool aggressive
Definition vacuumlazy.c:262
BlockNumber new_frozen_tuple_pages
Definition vacuumlazy.c:314
GlobalVisState * vistest
Definition vacuumlazy.c:275
BlockNumber removed_pages
Definition vacuumlazy.c:313
int num_index_scans
Definition vacuumlazy.c:342
IndexBulkDeleteResult ** indstats
Definition vacuumlazy.c:339
BlockNumber new_all_frozen_pages
Definition vacuumlazy.c:329
double new_live_tuples
Definition vacuumlazy.c:337
double new_rel_tuples
Definition vacuumlazy.c:336
BlockNumber new_all_visible_all_frozen_pages
Definition vacuumlazy.c:326
BlockNumber new_all_visible_pages
Definition vacuumlazy.c:317
TransactionId NewRelfrozenXid
Definition vacuumlazy.c:277
Relation rel
Definition vacuumlazy.c:253
bool consider_bypass_optimization
Definition vacuumlazy.c:266
BlockNumber rel_pages
Definition vacuumlazy.c:304
Size total_dead_items_bytes
Definition vacuumlazy.c:344
BlockNumber next_unskippable_block
Definition vacuumlazy.c:355
int64 recently_dead_tuples
Definition vacuumlazy.c:350
int64 tuples_frozen
Definition vacuumlazy.c:347
char * dbname
Definition vacuumlazy.c:282
BlockNumber missed_dead_pages
Definition vacuumlazy.c:332
BlockNumber current_block
Definition vacuumlazy.c:354
char * relnamespace
Definition vacuumlazy.c:283
int64 live_tuples
Definition vacuumlazy.c:349
int64 lpdead_items
Definition vacuumlazy.c:348
BufferAccessStrategy bstrategy
Definition vacuumlazy.c:258
BlockNumber eager_scan_remaining_successes
Definition vacuumlazy.c:382
bool skippedallvis
Definition vacuumlazy.c:279
BlockNumber lpdead_item_pages
Definition vacuumlazy.c:331
BlockNumber eager_scanned_pages
Definition vacuumlazy.c:311
Relation * indrels
Definition vacuumlazy.c:254
bool skipwithvm
Definition vacuumlazy.c:264
bool do_index_cleanup
Definition vacuumlazy.c:270
MultiXactId NewRelminMxid
Definition vacuumlazy.c:278
int64 missed_dead_tuples
Definition vacuumlazy.c:351
BlockNumber blkno
Definition vacuumlazy.c:286
struct VacuumCutoffs cutoffs
Definition vacuumlazy.c:274
char * relname
Definition vacuumlazy.c:284
BlockNumber eager_scan_max_fails_per_region
Definition vacuumlazy.c:393
VacErrPhase phase
Definition vacuumlazy.c:288
char * indname
Definition vacuumlazy.c:285
bool do_index_vacuuming
Definition vacuumlazy.c:269
BlockNumber blkno
Definition vacuumlazy.c:410
VacErrPhase phase
Definition vacuumlazy.c:412
OffsetNumber offnum
Definition vacuumlazy.c:411
int64 st_progress_param[PGSTAT_NUM_PROGRESS_PARAM]
size_t max_bytes
Definition vacuum.h:299
int64 num_items
Definition vacuum.h:300
int nworkers
Definition vacuum.h:251
VacOptValue truncate
Definition vacuum.h:236
bits32 options
Definition vacuum.h:219
int log_vacuum_min_duration
Definition vacuum.h:227
bool is_wraparound
Definition vacuum.h:226
VacOptValue index_cleanup
Definition vacuum.h:235
double max_eager_freeze_failure_rate
Definition vacuum.h:244
int64 wal_buffers_full
Definition instrument.h:57
uint64 wal_bytes
Definition instrument.h:55
int64 wal_fpi
Definition instrument.h:54
uint64 wal_fpi_bytes
Definition instrument.h:56
int64 wal_records
Definition instrument.h:53
TidStoreIter * TidStoreBeginIterate(TidStore *ts)
Definition tidstore.c:471
void TidStoreEndIterate(TidStoreIter *iter)
Definition tidstore.c:518
TidStoreIterResult * TidStoreIterateNext(TidStoreIter *iter)
Definition tidstore.c:493
TidStore * TidStoreCreateLocal(size_t max_bytes, bool insert_only)
Definition tidstore.c:162
void TidStoreDestroy(TidStore *ts)
Definition tidstore.c:317
int TidStoreGetBlockOffsets(TidStoreIterResult *result, OffsetNumber *offsets, int max_offsets)
Definition tidstore.c:566
void TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition tidstore.c:345
size_t TidStoreMemoryUsage(TidStore *ts)
Definition tidstore.c:532
static bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition transam.h:297
static TransactionId ReadNextTransactionId(void)
Definition transam.h:377
#define InvalidTransactionId
Definition transam.h:31
static bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition transam.h:282
#define TransactionIdIsValid(xid)
Definition transam.h:41
#define TransactionIdIsNormal(xid)
Definition transam.h:42
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition transam.h:263
bool track_cost_delay_timing
Definition vacuum.c:82
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition vacuum.c:2362
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition vacuum.c:2654
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition vacuum.c:2405
void vacuum_delay_point(bool is_analyze)
Definition vacuum.c:2426
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
Definition vacuum.c:1268
bool VacuumFailsafeActive
Definition vacuum.c:110
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition vacuum.c:1330
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, BlockNumber num_all_frozen_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
Definition vacuum.c:1426
bool vacuum_get_cutoffs(Relation rel, const VacuumParams params, struct VacuumCutoffs *cutoffs)
Definition vacuum.c:1100
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, TidStore *dead_items, VacDeadItemsInfo *dead_items_info)
Definition vacuum.c:2633
#define VACOPT_VERBOSE
Definition vacuum.h:182
@ VACOPTVALUE_AUTO
Definition vacuum.h:203
@ VACOPTVALUE_ENABLED
Definition vacuum.h:205
@ VACOPTVALUE_UNSPECIFIED
Definition vacuum.h:202
@ VACOPTVALUE_DISABLED
Definition vacuum.h:204
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition vacuum.h:188
static int lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool *has_lpdead_items, bool *vm_page_frozen)
static void dead_items_cleanup(LVRelState *vacrel)
static void identify_and_fix_vm_corruption(Relation rel, Buffer heap_buffer, BlockNumber heap_blk, Page heap_page, int nlpdead_items, Buffer vmbuffer, uint8 *vmbits)
static void update_relstats_all_indexes(LVRelState *vacrel)
static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
void heap_vacuum_rel(Relation rel, const VacuumParams params, BufferAccessStrategy bstrategy)
Definition vacuumlazy.c:626
static BlockNumber heap_vac_scan_next_block(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
static void heap_vacuum_eager_scan_setup(LVRelState *vacrel, const VacuumParams params)
Definition vacuumlazy.c:499
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition vacuumlazy.c:178
static void vacuum_error_callback(void *arg)
static bool heap_page_would_be_all_visible(Relation rel, Buffer buf, TransactionId OldestXmin, OffsetNumber *deadoffsets, int ndeadoffsets, bool *all_frozen, TransactionId *visibility_cutoff_xid, OffsetNumber *logging_offnum)
#define EAGER_SCAN_REGION_SIZE
Definition vacuumlazy.c:248
static void lazy_truncate_heap(LVRelState *vacrel)
static void lazy_vacuum(LVRelState *vacrel)
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
#define MAX_EAGER_FREEZE_SUCCESS_RATE
Definition vacuumlazy.c:239
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
static BlockNumber vacuum_reap_lp_read_stream_next(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
#define REL_TRUNCATE_MINIMUM
Definition vacuumlazy.c:167
static bool should_attempt_truncation(LVRelState *vacrel)
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
VacErrPhase
Definition vacuumlazy.c:223
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition vacuumlazy.c:225
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition vacuumlazy.c:226
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition vacuumlazy.c:229
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition vacuumlazy.c:228
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition vacuumlazy.c:227
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition vacuumlazy.c:224
static void lazy_scan_heap(LVRelState *vacrel)
#define ParallelVacuumIsActive(vacrel)
Definition vacuumlazy.c:219
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
static void dead_items_reset(LVRelState *vacrel)
#define REL_TRUNCATE_FRACTION
Definition vacuumlazy.c:168
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
#define PREFETCH_SIZE
Definition vacuumlazy.c:213
static void lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
#define BYPASS_THRESHOLD_PAGES
Definition vacuumlazy.c:185
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition vacuumlazy.c:179
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
#define SKIP_PAGES_THRESHOLD
Definition vacuumlazy.c:207
#define FAILSAFE_EVERY_PAGES
Definition vacuumlazy.c:191
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition vacuumlazy.c:177
static int cmpOffsetNumbers(const void *a, const void *b)
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
#define VACUUM_FSM_EVERY_PAGES
Definition vacuumlazy.c:200
TidStore * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, VacDeadItemsInfo **dead_items_info_p)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int vac_work_mem, int elevel, BufferAccessStrategy bstrategy)
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)
void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
void visibilitymap_set_vmbits(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
#define WL_TIMEOUT
#define WL_EXIT_ON_PM_DEATH
#define WL_LATCH_SET
bool IsInParallelMode(void)
Definition xact.c:1090
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29
#define InvalidXLogRecPtr
Definition xlogdefs.h:28
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)

◆ MAX_EAGER_FREEZE_SUCCESS_RATE

#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2

Definition at line 239 of file vacuumlazy.c.

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 219 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 213 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 168 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 167 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 207 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 200 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 177 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 179 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 178 of file vacuumlazy.c.

Typedef Documentation

◆ LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 222 of file vacuumlazy.c.

Function Documentation

◆ cmpOffsetNumbers()

static int cmpOffsetNumbers ( const void a,
const void b 
)
static

Definition at line 1960 of file vacuumlazy.c.

1961{
1962 return pg_cmp_u16(*(const OffsetNumber *) a, *(const OffsetNumber *) b);
1963}

References a, b, and pg_cmp_u16().

Referenced by lazy_scan_prune().

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool lock_waiter_detected 
)
static

Definition at line 3383 of file vacuumlazy.c.

3384{
3386 "prefetch size must be power of 2");
3387
3388 BlockNumber blkno;
3390 instr_time starttime;
3391
3392 /* Initialize the starttime if we check for conflicting lock requests */
3393 INSTR_TIME_SET_CURRENT(starttime);
3394
3395 /*
3396 * Start checking blocks at what we believe relation end to be and move
3397 * backwards. (Strange coding of loop control is needed because blkno is
3398 * unsigned.) To make the scan faster, we prefetch a few blocks at a time
3399 * in forward direction, so that OS-level readahead can kick in.
3400 */
3401 blkno = vacrel->rel_pages;
3403 while (blkno > vacrel->nonempty_pages)
3404 {
3405 Buffer buf;
3406 Page page;
3407 OffsetNumber offnum,
3408 maxoff;
3409 bool hastup;
3410
3411 /*
3412 * Check if another process requests a lock on our relation. We are
3413 * holding an AccessExclusiveLock here, so they will be waiting. We
3414 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
3415 * only check if that interval has elapsed once every 32 blocks to
3416 * keep the number of system calls and actual shared lock table
3417 * lookups to a minimum.
3418 */
3419 if ((blkno % 32) == 0)
3420 {
3423
3426 INSTR_TIME_SUBTRACT(elapsed, starttime);
3427 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
3429 {
3431 {
3432 ereport(vacrel->verbose ? INFO : DEBUG2,
3433 (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
3434 vacrel->relname)));
3435
3436 *lock_waiter_detected = true;
3437 return blkno;
3438 }
3439 starttime = currenttime;
3440 }
3441 }
3442
3443 /*
3444 * We don't insert a vacuum delay point here, because we have an
3445 * exclusive lock on the table which we want to hold for as short a
3446 * time as possible. We still need to check for interrupts however.
3447 */
3449
3450 blkno--;
3451
3452 /* If we haven't prefetched this lot yet, do so now. */
3453 if (prefetchedUntil > blkno)
3454 {
3457
3458 prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
3459 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3460 {
3463 }
3465 }
3466
3468 vacrel->bstrategy);
3469
3470 /* In this phase we only need shared access to the buffer */
3472
3473 page = BufferGetPage(buf);
3474
3475 if (PageIsNew(page) || PageIsEmpty(page))
3476 {
3478 continue;
3479 }
3480
3481 hastup = false;
3482 maxoff = PageGetMaxOffsetNumber(page);
3483 for (offnum = FirstOffsetNumber;
3484 offnum <= maxoff;
3485 offnum = OffsetNumberNext(offnum))
3486 {
3487 ItemId itemid;
3488
3489 itemid = PageGetItemId(page, offnum);
3490
3491 /*
3492 * Note: any non-unused item should be taken as a reason to keep
3493 * this page. Even an LP_DEAD item makes truncation unsafe, since
3494 * we must not have cleaned out its index entries.
3495 */
3496 if (ItemIdIsUsed(itemid))
3497 {
3498 hastup = true;
3499 break; /* can stop scanning */
3500 }
3501 } /* scan along page */
3502
3504
3505 /* Done scanning if we found a tuple here */
3506 if (hastup)
3507 return blkno + 1;
3508 }
3509
3510 /*
3511 * If we fall out of the loop, all the previously-thought-to-be-empty
3512 * pages still are; we need not bother to look at the last known-nonempty
3513 * page.
3514 */
3515 return vacrel->nonempty_pages;
3516}

References AccessExclusiveLock, buf, BUFFER_LOCK_SHARE, BufferGetPage(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), fb(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageIsNew(), PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), StaticAssertDecl, UnlockReleaseBuffer(), and VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL.

Referenced by lazy_truncate_heap().

◆ dead_items_add()

static void dead_items_add ( LVRelState vacrel,
BlockNumber  blkno,
OffsetNumber offsets,
int  num_offsets 
)
static

Definition at line 3591 of file vacuumlazy.c.

3593{
3594 const int prog_index[2] = {
3597 };
3598 int64 prog_val[2];
3599
3600 TidStoreSetBlockOffsets(vacrel->dead_items, blkno, offsets, num_offsets);
3601 vacrel->dead_items_info->num_items += num_offsets;
3602
3603 /* update the progress information */
3604 prog_val[0] = vacrel->dead_items_info->num_items;
3605 prog_val[1] = TidStoreMemoryUsage(vacrel->dead_items);
3607}

References fb(), pgstat_progress_update_multi_param(), PROGRESS_VACUUM_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS, TidStoreMemoryUsage(), and TidStoreSetBlockOffsets().

Referenced by lazy_scan_noprune(), and lazy_scan_prune().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 3526 of file vacuumlazy.c.

3527{
3528 VacDeadItemsInfo *dead_items_info;
3530 autovacuum_work_mem != -1 ?
3532
3533 /*
3534 * Initialize state for a parallel vacuum. As of now, only one worker can
3535 * be used for an index, so we invoke parallelism only if there are at
3536 * least two indexes on a table.
3537 */
3538 if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
3539 {
3540 /*
3541 * Since parallel workers cannot access data in temporary tables, we
3542 * can't perform parallel vacuum on them.
3543 */
3545 {
3546 /*
3547 * Give warning only if the user explicitly tries to perform a
3548 * parallel vacuum on the temporary table.
3549 */
3550 if (nworkers > 0)
3552 (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3553 vacrel->relname)));
3554 }
3555 else
3556 vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
3557 vacrel->nindexes, nworkers,
3559 vacrel->verbose ? INFO : DEBUG2,
3560 vacrel->bstrategy);
3561
3562 /*
3563 * If parallel mode started, dead_items and dead_items_info spaces are
3564 * allocated in DSM.
3565 */
3567 {
3569 &vacrel->dead_items_info);
3570 return;
3571 }
3572 }
3573
3574 /*
3575 * Serial VACUUM case. Allocate both dead_items and dead_items_info
3576 * locally.
3577 */
3578
3579 dead_items_info = palloc_object(VacDeadItemsInfo);
3580 dead_items_info->max_bytes = vac_work_mem * (Size) 1024;
3581 dead_items_info->num_items = 0;
3582 vacrel->dead_items_info = dead_items_info;
3583
3584 vacrel->dead_items = TidStoreCreateLocal(dead_items_info->max_bytes, true);
3585}

References AmAutoVacuumWorkerProcess, autovacuum_work_mem, ParallelVacuumState::dead_items, DEBUG2, ereport, errmsg(), fb(), INFO, maintenance_work_mem, VacDeadItemsInfo::max_bytes, VacDeadItemsInfo::num_items, palloc_object, parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, RelationUsesLocalBuffers, TidStoreCreateLocal(), and WARNING.

Referenced by heap_vacuum_rel().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 3639 of file vacuumlazy.c.

3640{
3642 {
3643 /* Don't bother with pfree here */
3644 return;
3645 }
3646
3647 /* End parallel mode */
3648 parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
3649 vacrel->pvs = NULL;
3650}

References fb(), parallel_vacuum_end(), and ParallelVacuumIsActive.

Referenced by heap_vacuum_rel().

◆ dead_items_reset()

static void dead_items_reset ( LVRelState vacrel)
static

Definition at line 3613 of file vacuumlazy.c.

3614{
3615 /* Update statistics for dead items */
3616 vacrel->num_dead_items_resets++;
3617 vacrel->total_dead_items_bytes += TidStoreMemoryUsage(vacrel->dead_items);
3618
3620 {
3622 vacrel->dead_items = parallel_vacuum_get_dead_items(vacrel->pvs,
3623 &vacrel->dead_items_info);
3624 return;
3625 }
3626
3627 /* Recreate the tidstore with the same max_bytes limitation */
3628 TidStoreDestroy(vacrel->dead_items);
3629 vacrel->dead_items = TidStoreCreateLocal(vacrel->dead_items_info->max_bytes, true);
3630
3631 /* Reset the counter */
3632 vacrel->dead_items_info->num_items = 0;
3633}

References fb(), parallel_vacuum_get_dead_items(), parallel_vacuum_reset_dead_items(), ParallelVacuumIsActive, TidStoreCreateLocal(), TidStoreDestroy(), and TidStoreMemoryUsage().

Referenced by lazy_vacuum().

◆ find_next_unskippable_block()

static void find_next_unskippable_block ( LVRelState vacrel,
bool skipsallvis 
)
static

Definition at line 1721 of file vacuumlazy.c.

1722{
1723 BlockNumber rel_pages = vacrel->rel_pages;
1724 BlockNumber next_unskippable_block = vacrel->next_unskippable_block + 1;
1725 Buffer next_unskippable_vmbuffer = vacrel->next_unskippable_vmbuffer;
1726 bool next_unskippable_eager_scanned = false;
1727
1728 *skipsallvis = false;
1729
1730 for (;; next_unskippable_block++)
1731 {
1733 next_unskippable_block,
1734 &next_unskippable_vmbuffer);
1735
1736
1737 /*
1738 * At the start of each eager scan region, normal vacuums with eager
1739 * scanning enabled reset the failure counter, allowing vacuum to
1740 * resume eager scanning if it had been suspended in the previous
1741 * region.
1742 */
1743 if (next_unskippable_block >= vacrel->next_eager_scan_region_start)
1744 {
1745 vacrel->eager_scan_remaining_fails =
1746 vacrel->eager_scan_max_fails_per_region;
1747 vacrel->next_eager_scan_region_start += EAGER_SCAN_REGION_SIZE;
1748 }
1749
1750 /*
1751 * A block is unskippable if it is not all visible according to the
1752 * visibility map.
1753 */
1755 {
1757 break;
1758 }
1759
1760 /*
1761 * Caller must scan the last page to determine whether it has tuples
1762 * (caller must have the opportunity to set vacrel->nonempty_pages).
1763 * This rule avoids having lazy_truncate_heap() take access-exclusive
1764 * lock on rel to attempt a truncation that fails anyway, just because
1765 * there are tuples on the last page (it is likely that there will be
1766 * tuples on other nearby pages as well, but those can be skipped).
1767 *
1768 * Implement this by always treating the last block as unsafe to skip.
1769 */
1770 if (next_unskippable_block == rel_pages - 1)
1771 break;
1772
1773 /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1774 if (!vacrel->skipwithvm)
1775 break;
1776
1777 /*
1778 * All-frozen pages cannot contain XIDs < OldestXmin (XIDs that aren't
1779 * already frozen by now), so this page can be skipped.
1780 */
1781 if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0)
1782 continue;
1783
1784 /*
1785 * Aggressive vacuums cannot skip any all-visible pages that are not
1786 * also all-frozen.
1787 */
1788 if (vacrel->aggressive)
1789 break;
1790
1791 /*
1792 * Normal vacuums with eager scanning enabled only skip all-visible
1793 * but not all-frozen pages if they have hit the failure limit for the
1794 * current eager scan region.
1795 */
1796 if (vacrel->eager_scan_remaining_fails > 0)
1797 {
1798 next_unskippable_eager_scanned = true;
1799 break;
1800 }
1801
1802 /*
1803 * All-visible blocks are safe to skip in a normal vacuum. But
1804 * remember that the final range contains such a block for later.
1805 */
1806 *skipsallvis = true;
1807 }
1808
1809 /* write the local variables back to vacrel */
1810 vacrel->next_unskippable_block = next_unskippable_block;
1811 vacrel->next_unskippable_eager_scanned = next_unskippable_eager_scanned;
1812 vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
1813}

References Assert, EAGER_SCAN_REGION_SIZE, fb(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_get_status().

Referenced by heap_vac_scan_next_block().

◆ heap_page_would_be_all_visible()

static bool heap_page_would_be_all_visible ( Relation  rel,
Buffer  buf,
TransactionId  OldestXmin,
OffsetNumber deadoffsets,
int  ndeadoffsets,
bool all_frozen,
TransactionId visibility_cutoff_xid,
OffsetNumber logging_offnum 
)
static

Definition at line 3707 of file vacuumlazy.c.

3714{
3715 Page page = BufferGetPage(buf);
3717 OffsetNumber offnum,
3718 maxoff;
3719 bool all_visible = true;
3720 int matched_dead_count = 0;
3721
3722 *visibility_cutoff_xid = InvalidTransactionId;
3723 *all_frozen = true;
3724
3725 Assert(ndeadoffsets == 0 || deadoffsets);
3726
3727#ifdef USE_ASSERT_CHECKING
3728 /* Confirm input deadoffsets[] is strictly sorted */
3729 if (ndeadoffsets > 1)
3730 {
3731 for (int i = 1; i < ndeadoffsets; i++)
3732 Assert(deadoffsets[i - 1] < deadoffsets[i]);
3733 }
3734#endif
3735
3736 maxoff = PageGetMaxOffsetNumber(page);
3737 for (offnum = FirstOffsetNumber;
3738 offnum <= maxoff && all_visible;
3739 offnum = OffsetNumberNext(offnum))
3740 {
3741 ItemId itemid;
3742 HeapTupleData tuple;
3744
3745 /*
3746 * Set the offset number so that we can display it along with any
3747 * error that occurred while processing this tuple.
3748 */
3749 *logging_offnum = offnum;
3750 itemid = PageGetItemId(page, offnum);
3751
3752 /* Unused or redirect line pointers are of no interest */
3753 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
3754 continue;
3755
3756 ItemPointerSet(&(tuple.t_self), blockno, offnum);
3757
3758 /*
3759 * Dead line pointers can have index pointers pointing to them. So
3760 * they can't be treated as visible
3761 */
3762 if (ItemIdIsDead(itemid))
3763 {
3764 if (!deadoffsets ||
3766 deadoffsets[matched_dead_count] != offnum)
3767 {
3768 *all_frozen = all_visible = false;
3769 break;
3770 }
3772 continue;
3773 }
3774
3775 Assert(ItemIdIsNormal(itemid));
3776
3777 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3778 tuple.t_len = ItemIdGetLength(itemid);
3779 tuple.t_tableOid = RelationGetRelid(rel);
3780
3781 /* Visibility checks may do IO or allocate memory */
3784 {
3785 case HEAPTUPLE_LIVE:
3786 {
3787 TransactionId xmin;
3788
3789 /* Check comments in lazy_scan_prune. */
3791 {
3792 all_visible = false;
3793 *all_frozen = false;
3794 break;
3795 }
3796
3797 /*
3798 * The inserter definitely committed. But is it old enough
3799 * that everyone sees it as committed?
3800 */
3801 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3802 if (!TransactionIdPrecedes(xmin, OldestXmin))
3803 {
3804 all_visible = false;
3805 *all_frozen = false;
3806 break;
3807 }
3808
3809 /* Track newest xmin on page. */
3810 if (TransactionIdFollows(xmin, *visibility_cutoff_xid) &&
3812 *visibility_cutoff_xid = xmin;
3813
3814 /* Check whether this tuple is already frozen or not */
3815 if (all_visible && *all_frozen &&
3817 *all_frozen = false;
3818 }
3819 break;
3820
3821 case HEAPTUPLE_DEAD:
3825 {
3826 all_visible = false;
3827 *all_frozen = false;
3828 break;
3829 }
3830 default:
3831 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3832 break;
3833 }
3834 } /* scan along page */
3835
3836 /* Clear the offset information once we have processed the given page. */
3838
3839 return all_visible;
3840}

References Assert, buf, BufferGetBlockNumber(), BufferGetPage(), CritSectionCount, elog, ERROR, fb(), FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin(), HeapTupleHeaderXminCommitted(), HeapTupleSatisfiesVacuumHorizon(), i, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, and TransactionIdPrecedes().

Referenced by lazy_vacuum_heap_page().

◆ heap_vac_scan_next_block()

static BlockNumber heap_vac_scan_next_block ( ReadStream stream,
void callback_private_data,
void per_buffer_data 
)
static

Definition at line 1621 of file vacuumlazy.c.

1624{
1626 LVRelState *vacrel = callback_private_data;
1627
1628 /* relies on InvalidBlockNumber + 1 overflowing to 0 on first call */
1630
1631 /* Have we reached the end of the relation? */
1632 if (next_block >= vacrel->rel_pages)
1633 {
1634 if (BufferIsValid(vacrel->next_unskippable_vmbuffer))
1635 {
1636 ReleaseBuffer(vacrel->next_unskippable_vmbuffer);
1637 vacrel->next_unskippable_vmbuffer = InvalidBuffer;
1638 }
1639 return InvalidBlockNumber;
1640 }
1641
1642 /*
1643 * We must be in one of the three following states:
1644 */
1645 if (next_block > vacrel->next_unskippable_block ||
1646 vacrel->next_unskippable_block == InvalidBlockNumber)
1647 {
1648 /*
1649 * 1. We have just processed an unskippable block (or we're at the
1650 * beginning of the scan). Find the next unskippable block using the
1651 * visibility map.
1652 */
1653 bool skipsallvis;
1654
1656
1657 /*
1658 * We now know the next block that we must process. It can be the
1659 * next block after the one we just processed, or something further
1660 * ahead. If it's further ahead, we can jump to it, but we choose to
1661 * do so only if we can skip at least SKIP_PAGES_THRESHOLD consecutive
1662 * pages. Since we're reading sequentially, the OS should be doing
1663 * readahead for us, so there's no gain in skipping a page now and
1664 * then. Skipping such a range might even discourage sequential
1665 * detection.
1666 *
1667 * This test also enables more frequent relfrozenxid advancement
1668 * during non-aggressive VACUUMs. If the range has any all-visible
1669 * pages then skipping makes updating relfrozenxid unsafe, which is a
1670 * real downside.
1671 */
1672 if (vacrel->next_unskippable_block - next_block >= SKIP_PAGES_THRESHOLD)
1673 {
1674 next_block = vacrel->next_unskippable_block;
1675 if (skipsallvis)
1676 vacrel->skippedallvis = true;
1677 }
1678 }
1679
1680 /* Now we must be in one of the two remaining states: */
1681 if (next_block < vacrel->next_unskippable_block)
1682 {
1683 /*
1684 * 2. We are processing a range of blocks that we could have skipped
1685 * but chose not to. We know that they are all-visible in the VM,
1686 * otherwise they would've been unskippable.
1687 */
1688 vacrel->current_block = next_block;
1689 /* Block was not eager scanned */
1690 *((bool *) per_buffer_data) = false;
1691 return vacrel->current_block;
1692 }
1693 else
1694 {
1695 /*
1696 * 3. We reached the next unskippable block. Process it. On next
1697 * iteration, we will be back in state 1.
1698 */
1699 Assert(next_block == vacrel->next_unskippable_block);
1700
1701 vacrel->current_block = next_block;
1702 *((bool *) per_buffer_data) = vacrel->next_unskippable_eager_scanned;
1703 return vacrel->current_block;
1704 }
1705}

References Assert, BufferIsValid(), LVRelState::current_block, fb(), find_next_unskippable_block(), InvalidBlockNumber, InvalidBuffer, ReleaseBuffer(), and SKIP_PAGES_THRESHOLD.

Referenced by lazy_scan_heap().

◆ heap_vacuum_eager_scan_setup()

static void heap_vacuum_eager_scan_setup ( LVRelState vacrel,
const VacuumParams  params 
)
static

Definition at line 499 of file vacuumlazy.c.

500{
504 float first_region_ratio;
506
507 /*
508 * Initialize eager scan management fields to their disabled values.
509 * Aggressive vacuums, normal vacuums of small tables, and normal vacuums
510 * of tables without sufficiently old tuples disable eager scanning.
511 */
512 vacrel->next_eager_scan_region_start = InvalidBlockNumber;
513 vacrel->eager_scan_max_fails_per_region = 0;
514 vacrel->eager_scan_remaining_fails = 0;
515 vacrel->eager_scan_remaining_successes = 0;
516
517 /* If eager scanning is explicitly disabled, just return. */
518 if (params.max_eager_freeze_failure_rate == 0)
519 return;
520
521 /*
522 * The caller will have determined whether or not an aggressive vacuum is
523 * required by either the vacuum parameters or the relative age of the
524 * oldest unfrozen transaction IDs. An aggressive vacuum must scan every
525 * all-visible page to safely advance the relfrozenxid and/or relminmxid,
526 * so scans of all-visible pages are not considered eager.
527 */
528 if (vacrel->aggressive)
529 return;
530
531 /*
532 * Aggressively vacuuming a small relation shouldn't take long, so it
533 * isn't worth amortizing. We use two times the region size as the size
534 * cutoff because the eager scan start block is a random spot somewhere in
535 * the first region, making the second region the first to be eager
536 * scanned normally.
537 */
538 if (vacrel->rel_pages < 2 * EAGER_SCAN_REGION_SIZE)
539 return;
540
541 /*
542 * We only want to enable eager scanning if we are likely to be able to
543 * freeze some of the pages in the relation.
544 *
545 * Tuples with XIDs older than OldestXmin or MXIDs older than OldestMxact
546 * are technically freezable, but we won't freeze them unless the criteria
547 * for opportunistic freezing is met. Only tuples with XIDs/MXIDs older
548 * than the FreezeLimit/MultiXactCutoff are frozen in the common case.
549 *
550 * So, as a heuristic, we wait until the FreezeLimit has advanced past the
551 * relfrozenxid or the MultiXactCutoff has advanced past the relminmxid to
552 * enable eager scanning.
553 */
554 if (TransactionIdIsNormal(vacrel->cutoffs.relfrozenxid) &&
555 TransactionIdPrecedes(vacrel->cutoffs.relfrozenxid,
556 vacrel->cutoffs.FreezeLimit))
558
560 MultiXactIdIsValid(vacrel->cutoffs.relminmxid) &&
561 MultiXactIdPrecedes(vacrel->cutoffs.relminmxid,
562 vacrel->cutoffs.MultiXactCutoff))
564
566 return;
567
568 /* We have met the criteria to eagerly scan some pages. */
569
570 /*
571 * Our success cap is MAX_EAGER_FREEZE_SUCCESS_RATE of the number of
572 * all-visible but not all-frozen blocks in the relation.
573 */
575
576 vacrel->eager_scan_remaining_successes =
579
580 /* If every all-visible page is frozen, eager scanning is disabled. */
581 if (vacrel->eager_scan_remaining_successes == 0)
582 return;
583
584 /*
585 * Now calculate the bounds of the first eager scan region. Its end block
586 * will be a random spot somewhere in the first EAGER_SCAN_REGION_SIZE
587 * blocks. This affects the bounds of all subsequent regions and avoids
588 * eager scanning and failing to freeze the same blocks each vacuum of the
589 * relation.
590 */
592
593 vacrel->next_eager_scan_region_start = randseed % EAGER_SCAN_REGION_SIZE;
594
597
598 vacrel->eager_scan_max_fails_per_region =
601
602 /*
603 * The first region will be smaller than subsequent regions. As such,
604 * adjust the eager freeze failures tolerated for this region.
605 */
606 first_region_ratio = 1 - (float) vacrel->next_eager_scan_region_start /
608
609 vacrel->eager_scan_remaining_fails =
610 vacrel->eager_scan_max_fails_per_region *
612}

References Assert, EAGER_SCAN_REGION_SIZE, fb(), InvalidBlockNumber, VacuumParams::max_eager_freeze_failure_rate, MAX_EAGER_FREEZE_SUCCESS_RATE, MultiXactIdIsValid, MultiXactIdPrecedes(), pg_global_prng_state, pg_prng_uint32(), TransactionIdIsNormal, TransactionIdPrecedes(), and visibilitymap_count().

Referenced by heap_vacuum_rel().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
const VacuumParams  params,
BufferAccessStrategy  bstrategy 
)

Definition at line 626 of file vacuumlazy.c.

628{
630 bool verbose,
631 instrument,
632 skipwithvm,
640 TimestampTz starttime = 0;
642 startwritetime = 0;
645 ErrorContextCallback errcallback;
646 char **indnames = NULL;
648
649 verbose = (params.options & VACOPT_VERBOSE) != 0;
650 instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
651 params.log_vacuum_min_duration >= 0));
652 if (instrument)
653 {
655 if (track_io_timing)
656 {
659 }
660 }
661
662 /* Used for instrumentation and stats report */
663 starttime = GetCurrentTimestamp();
664
666 RelationGetRelid(rel));
669 params.is_wraparound
672 else
675
676 /*
677 * Setup error traceback support for ereport() first. The idea is to set
678 * up an error context callback to display additional information on any
679 * error during a vacuum. During different phases of vacuum, we update
680 * the state so that the error context callback always display current
681 * information.
682 *
683 * Copy the names of heap rel into local memory for error reporting
684 * purposes, too. It isn't always safe to assume that we can get the name
685 * of each rel. It's convenient for code in lazy_scan_heap to always use
686 * these temp copies.
687 */
690 vacrel->relnamespace = get_namespace_name(RelationGetNamespace(rel));
691 vacrel->relname = pstrdup(RelationGetRelationName(rel));
692 vacrel->indname = NULL;
694 vacrel->verbose = verbose;
695 errcallback.callback = vacuum_error_callback;
696 errcallback.arg = vacrel;
697 errcallback.previous = error_context_stack;
698 error_context_stack = &errcallback;
699
700 /* Set up high level stuff about rel and its indexes */
701 vacrel->rel = rel;
703 &vacrel->indrels);
704 vacrel->bstrategy = bstrategy;
705 if (instrument && vacrel->nindexes > 0)
706 {
707 /* Copy index names used by instrumentation (not error reporting) */
708 indnames = palloc_array(char *, vacrel->nindexes);
709 for (int i = 0; i < vacrel->nindexes; i++)
711 }
712
713 /*
714 * The index_cleanup param either disables index vacuuming and cleanup or
715 * forces it to go ahead when we would otherwise apply the index bypass
716 * optimization. The default is 'auto', which leaves the final decision
717 * up to lazy_vacuum().
718 *
719 * The truncate param allows user to avoid attempting relation truncation,
720 * though it can't force truncation to happen.
721 */
724 params.truncate != VACOPTVALUE_AUTO);
725
726 /*
727 * While VacuumFailSafeActive is reset to false before calling this, we
728 * still need to reset it here due to recursive calls.
729 */
730 VacuumFailsafeActive = false;
731 vacrel->consider_bypass_optimization = true;
732 vacrel->do_index_vacuuming = true;
733 vacrel->do_index_cleanup = true;
734 vacrel->do_rel_truncate = (params.truncate != VACOPTVALUE_DISABLED);
736 {
737 /* Force disable index vacuuming up-front */
738 vacrel->do_index_vacuuming = false;
739 vacrel->do_index_cleanup = false;
740 }
741 else if (params.index_cleanup == VACOPTVALUE_ENABLED)
742 {
743 /* Force index vacuuming. Note that failsafe can still bypass. */
744 vacrel->consider_bypass_optimization = false;
745 }
746 else
747 {
748 /* Default/auto, make all decisions dynamically */
750 }
751
752 /* Initialize page counters explicitly (be tidy) */
753 vacrel->scanned_pages = 0;
754 vacrel->eager_scanned_pages = 0;
755 vacrel->removed_pages = 0;
756 vacrel->new_frozen_tuple_pages = 0;
757 vacrel->lpdead_item_pages = 0;
758 vacrel->missed_dead_pages = 0;
759 vacrel->nonempty_pages = 0;
760 /* dead_items_alloc allocates vacrel->dead_items later on */
761
762 /* Allocate/initialize output statistics state */
763 vacrel->new_rel_tuples = 0;
764 vacrel->new_live_tuples = 0;
765 vacrel->indstats = (IndexBulkDeleteResult **)
766 palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
767
768 /* Initialize remaining counters (be tidy) */
769 vacrel->num_index_scans = 0;
770 vacrel->num_dead_items_resets = 0;
771 vacrel->total_dead_items_bytes = 0;
772 vacrel->tuples_deleted = 0;
773 vacrel->tuples_frozen = 0;
774 vacrel->lpdead_items = 0;
775 vacrel->live_tuples = 0;
776 vacrel->recently_dead_tuples = 0;
777 vacrel->missed_dead_tuples = 0;
778
779 vacrel->new_all_visible_pages = 0;
780 vacrel->new_all_visible_all_frozen_pages = 0;
781 vacrel->new_all_frozen_pages = 0;
782
783 /*
784 * Get cutoffs that determine which deleted tuples are considered DEAD,
785 * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
786 * the extent of the blocks that we'll scan in lazy_scan_heap. It has to
787 * happen in this order to ensure that the OldestXmin cutoff field works
788 * as an upper bound on the XIDs stored in the pages we'll actually scan
789 * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
790 *
791 * Next acquire vistest, a related cutoff that's used in pruning. We use
792 * vistest in combination with OldestXmin to ensure that
793 * heap_page_prune_and_freeze() always removes any deleted tuple whose
794 * xmax is < OldestXmin. lazy_scan_prune must never become confused about
795 * whether a tuple should be frozen or removed. (In the future we might
796 * want to teach lazy_scan_prune to recompute vistest from time to time,
797 * to increase the number of dead tuples it can prune away.)
798 */
799 vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
801 vacrel->vistest = GlobalVisTestFor(rel);
802
803 /* Initialize state used to track oldest extant XID/MXID */
804 vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
805 vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
806
807 /*
808 * Initialize state related to tracking all-visible page skipping. This is
809 * very important to determine whether or not it is safe to advance the
810 * relfrozenxid/relminmxid.
811 */
812 vacrel->skippedallvis = false;
813 skipwithvm = true;
815 {
816 /*
817 * Force aggressive mode, and disable skipping blocks using the
818 * visibility map (even those set all-frozen)
819 */
820 vacrel->aggressive = true;
821 skipwithvm = false;
822 }
823
824 vacrel->skipwithvm = skipwithvm;
825
826 /*
827 * Set up eager scan tracking state. This must happen after determining
828 * whether or not the vacuum must be aggressive, because only normal
829 * vacuums use the eager scan algorithm.
830 */
832
833 /* Report the vacuum mode: 'normal' or 'aggressive' */
835 vacrel->aggressive
838
839 if (verbose)
840 {
841 if (vacrel->aggressive)
843 (errmsg("aggressively vacuuming \"%s.%s.%s\"",
844 vacrel->dbname, vacrel->relnamespace,
845 vacrel->relname)));
846 else
848 (errmsg("vacuuming \"%s.%s.%s\"",
849 vacrel->dbname, vacrel->relnamespace,
850 vacrel->relname)));
851 }
852
853 /*
854 * Allocate dead_items memory using dead_items_alloc. This handles
855 * parallel VACUUM initialization as part of allocating shared memory
856 * space used for dead_items. (But do a failsafe precheck first, to
857 * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
858 * is already dangerously old.)
859 */
862
863 /*
864 * Call lazy_scan_heap to perform all required heap pruning, index
865 * vacuuming, and heap vacuuming (plus related processing)
866 */
868
869 /*
870 * Save dead items max_bytes and update the memory usage statistics before
871 * cleanup, they are freed in parallel vacuum cases during
872 * dead_items_cleanup().
873 */
874 dead_items_max_bytes = vacrel->dead_items_info->max_bytes;
875 vacrel->total_dead_items_bytes += TidStoreMemoryUsage(vacrel->dead_items);
876
877 /*
878 * Free resources managed by dead_items_alloc. This ends parallel mode in
879 * passing when necessary.
880 */
883
884 /*
885 * Update pg_class entries for each of rel's indexes where appropriate.
886 *
887 * Unlike the later update to rel's pg_class entry, this is not critical.
888 * Maintains relpages/reltuples statistics used by the planner only.
889 */
890 if (vacrel->do_index_cleanup)
892
893 /* Done with rel's indexes */
894 vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
895
896 /* Optionally truncate rel */
899
900 /* Pop the error context stack */
901 error_context_stack = errcallback.previous;
902
903 /* Report that we are now doing final cleanup */
906
907 /*
908 * Prepare to update rel's pg_class entry.
909 *
910 * Aggressive VACUUMs must always be able to advance relfrozenxid to a
911 * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
912 * Non-aggressive VACUUMs may advance them by any amount, or not at all.
913 */
914 Assert(vacrel->NewRelfrozenXid == vacrel->cutoffs.OldestXmin ||
915 TransactionIdPrecedesOrEquals(vacrel->aggressive ? vacrel->cutoffs.FreezeLimit :
916 vacrel->cutoffs.relfrozenxid,
917 vacrel->NewRelfrozenXid));
918 Assert(vacrel->NewRelminMxid == vacrel->cutoffs.OldestMxact ||
919 MultiXactIdPrecedesOrEquals(vacrel->aggressive ? vacrel->cutoffs.MultiXactCutoff :
920 vacrel->cutoffs.relminmxid,
921 vacrel->NewRelminMxid));
922 if (vacrel->skippedallvis)
923 {
924 /*
925 * Must keep original relfrozenxid in a non-aggressive VACUUM that
926 * chose to skip an all-visible page range. The state that tracks new
927 * values will have missed unfrozen XIDs from the pages we skipped.
928 */
929 Assert(!vacrel->aggressive);
930 vacrel->NewRelfrozenXid = InvalidTransactionId;
931 vacrel->NewRelminMxid = InvalidMultiXactId;
932 }
933
934 /*
935 * For safety, clamp relallvisible to be not more than what we're setting
936 * pg_class.relpages to
937 */
938 new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
942
943 /*
944 * An all-frozen block _must_ be all-visible. As such, clamp the count of
945 * all-frozen blocks to the count of all-visible blocks. This matches the
946 * clamping of relallvisible above.
947 */
950
951 /*
952 * Now actually update rel's pg_class entry.
953 *
954 * In principle new_live_tuples could be -1 indicating that we (still)
955 * don't know the tuple count. In practice that can't happen, since we
956 * scan every page that isn't skipped using the visibility map.
957 */
958 vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
960 vacrel->nindexes > 0,
961 vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
963
964 /*
965 * Report results to the cumulative stats system, too.
966 *
967 * Deliberately avoid telling the stats system about LP_DEAD items that
968 * remain in the table due to VACUUM bypassing index and heap vacuuming.
969 * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
970 * It seems like a good idea to err on the side of not vacuuming again too
971 * soon in cases where the failsafe prevented significant amounts of heap
972 * vacuuming.
973 */
975 Max(vacrel->new_live_tuples, 0),
976 vacrel->recently_dead_tuples +
977 vacrel->missed_dead_tuples,
978 starttime);
980
981 if (instrument)
982 {
984
985 if (verbose || params.log_vacuum_min_duration == 0 ||
988 {
989 long secs_dur;
990 int usecs_dur;
991 WalUsage walusage;
992 BufferUsage bufferusage;
994 char *msgfmt;
995 int32 diff;
996 double read_rate = 0,
997 write_rate = 0;
1001
1003 memset(&walusage, 0, sizeof(WalUsage));
1005 memset(&bufferusage, 0, sizeof(BufferUsage));
1007
1008 total_blks_hit = bufferusage.shared_blks_hit +
1009 bufferusage.local_blks_hit;
1010 total_blks_read = bufferusage.shared_blks_read +
1011 bufferusage.local_blks_read;
1013 bufferusage.local_blks_dirtied;
1014
1016 if (verbose)
1017 {
1018 /*
1019 * Aggressiveness already reported earlier, in dedicated
1020 * VACUUM VERBOSE ereport
1021 */
1022 Assert(!params.is_wraparound);
1023 msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
1024 }
1025 else if (params.is_wraparound)
1026 {
1027 /*
1028 * While it's possible for a VACUUM to be both is_wraparound
1029 * and !aggressive, that's just a corner-case -- is_wraparound
1030 * implies aggressive. Produce distinct output for the corner
1031 * case all the same, just in case.
1032 */
1033 if (vacrel->aggressive)
1034 msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1035 else
1036 msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1037 }
1038 else
1039 {
1040 if (vacrel->aggressive)
1041 msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
1042 else
1043 msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
1044 }
1046 vacrel->dbname,
1047 vacrel->relnamespace,
1048 vacrel->relname,
1049 vacrel->num_index_scans);
1050 appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total), %u eagerly scanned\n"),
1051 vacrel->removed_pages,
1053 vacrel->scanned_pages,
1054 orig_rel_pages == 0 ? 100.0 :
1055 100.0 * vacrel->scanned_pages /
1057 vacrel->eager_scanned_pages);
1059 _("tuples: %" PRId64 " removed, %" PRId64 " remain, %" PRId64 " are dead but not yet removable\n"),
1060 vacrel->tuples_deleted,
1061 (int64) vacrel->new_rel_tuples,
1062 vacrel->recently_dead_tuples);
1063 if (vacrel->missed_dead_tuples > 0)
1065 _("tuples missed: %" PRId64 " dead from %u pages not removed due to cleanup lock contention\n"),
1066 vacrel->missed_dead_tuples,
1067 vacrel->missed_dead_pages);
1069 vacrel->cutoffs.OldestXmin);
1071 _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
1072 vacrel->cutoffs.OldestXmin, diff);
1074 {
1075 diff = (int32) (vacrel->NewRelfrozenXid -
1076 vacrel->cutoffs.relfrozenxid);
1078 _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
1079 vacrel->NewRelfrozenXid, diff);
1080 }
1081 if (minmulti_updated)
1082 {
1083 diff = (int32) (vacrel->NewRelminMxid -
1084 vacrel->cutoffs.relminmxid);
1086 _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
1087 vacrel->NewRelminMxid, diff);
1088 }
1089 appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %" PRId64 " tuples frozen\n"),
1090 vacrel->new_frozen_tuple_pages,
1091 orig_rel_pages == 0 ? 100.0 :
1092 100.0 * vacrel->new_frozen_tuple_pages /
1094 vacrel->tuples_frozen);
1095
1097 _("visibility map: %u pages set all-visible, %u pages set all-frozen (%u were all-visible)\n"),
1098 vacrel->new_all_visible_pages,
1099 vacrel->new_all_visible_all_frozen_pages +
1100 vacrel->new_all_frozen_pages,
1101 vacrel->new_all_frozen_pages);
1102 if (vacrel->do_index_vacuuming)
1103 {
1104 if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
1105 appendStringInfoString(&buf, _("index scan not needed: "));
1106 else
1107 appendStringInfoString(&buf, _("index scan needed: "));
1108
1109 msgfmt = _("%u pages from table (%.2f%% of total) had %" PRId64 " dead item identifiers removed\n");
1110 }
1111 else
1112 {
1114 appendStringInfoString(&buf, _("index scan bypassed: "));
1115 else
1116 appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
1117
1118 msgfmt = _("%u pages from table (%.2f%% of total) have %" PRId64 " dead item identifiers\n");
1119 }
1121 vacrel->lpdead_item_pages,
1122 orig_rel_pages == 0 ? 100.0 :
1123 100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
1124 vacrel->lpdead_items);
1125 for (int i = 0; i < vacrel->nindexes; i++)
1126 {
1127 IndexBulkDeleteResult *istat = vacrel->indstats[i];
1128
1129 if (!istat)
1130 continue;
1131
1133 _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
1134 indnames[i],
1135 istat->num_pages,
1136 istat->pages_newly_deleted,
1137 istat->pages_deleted,
1138 istat->pages_free);
1139 }
1141 {
1142 /*
1143 * We bypass the changecount mechanism because this value is
1144 * only updated by the calling process. We also rely on the
1145 * above call to pgstat_progress_end_command() to not clear
1146 * the st_progress_param array.
1147 */
1148 appendStringInfo(&buf, _("delay time: %.3f ms\n"),
1150 }
1151 if (track_io_timing)
1152 {
1153 double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
1154 double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
1155
1156 appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
1157 read_ms, write_ms);
1158 }
1159 if (secs_dur > 0 || usecs_dur > 0)
1160 {
1162 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1164 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1165 }
1166 appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
1169 _("buffer usage: %" PRId64 " hits, %" PRId64 " reads, %" PRId64 " dirtied\n"),
1174 _("WAL usage: %" PRId64 " records, %" PRId64 " full page images, %" PRIu64 " bytes, %" PRIu64 " full page image bytes, %" PRId64 " buffers full\n"),
1175 walusage.wal_records,
1176 walusage.wal_fpi,
1177 walusage.wal_bytes,
1178 walusage.wal_fpi_bytes,
1179 walusage.wal_buffers_full);
1180
1181 /*
1182 * Report the dead items memory usage.
1183 *
1184 * The num_dead_items_resets counter increases when we reset the
1185 * collected dead items, so the counter is non-zero if at least
1186 * one dead items are collected, even if index vacuuming is
1187 * disabled.
1188 */
1190 ngettext("memory usage: dead item storage %.2f MB accumulated across %d reset (limit %.2f MB each)\n",
1191 "memory usage: dead item storage %.2f MB accumulated across %d resets (limit %.2f MB each)\n",
1192 vacrel->num_dead_items_resets),
1193 (double) vacrel->total_dead_items_bytes / (1024 * 1024),
1194 vacrel->num_dead_items_resets,
1195 (double) dead_items_max_bytes / (1024 * 1024));
1196 appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
1197
1198 ereport(verbose ? INFO : LOG,
1199 (errmsg_internal("%s", buf.data)));
1200 pfree(buf.data);
1201 }
1202 }
1203
1204 /* Cleanup index statistics and index names */
1205 for (int i = 0; i < vacrel->nindexes; i++)
1206 {
1207 if (vacrel->indstats[i])
1208 pfree(vacrel->indstats[i]);
1209
1210 if (instrument)
1211 pfree(indnames[i]);
1212 }
1213}

References _, AmAutoVacuumWorkerProcess, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert, buf, BufferUsageAccumDiff(), ErrorContextCallback::callback, dead_items_alloc(), dead_items_cleanup(), ereport, errmsg(), errmsg_internal(), error_context_stack, fb(), get_database_name(), get_namespace_name(), GetCurrentTimestamp(), GlobalVisTestFor(), heap_vacuum_eager_scan_setup(), i, VacuumParams::index_cleanup, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsInParallelMode(), lazy_check_wraparound_failsafe(), lazy_scan_heap(), lazy_truncate_heap(), BufferUsage::local_blks_dirtied, BufferUsage::local_blks_hit, BufferUsage::local_blks_read, LOG, VacuumParams::log_vacuum_min_duration, Max, MultiXactIdPrecedesOrEquals(), MyBEEntry, MyDatabaseId, ngettext, NoLock, IndexBulkDeleteResult::num_pages, VacuumParams::nworkers, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, palloc0(), palloc0_object, palloc_array, pfree(), pg_rusage_init(), pg_rusage_show(), pgBufferUsage, pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_DELAY_TIME, PROGRESS_VACUUM_MODE, PROGRESS_VACUUM_MODE_AGGRESSIVE, PROGRESS_VACUUM_MODE_NORMAL, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, PROGRESS_VACUUM_STARTED_BY, PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM, PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM_WRAPAROUND, PROGRESS_VACUUM_STARTED_BY_MANUAL, pstrdup(), ReadNextTransactionId(), RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, RowExclusiveLock, BufferUsage::shared_blks_dirtied, BufferUsage::shared_blks_hit, BufferUsage::shared_blks_read, should_attempt_truncation(), PgBackendStatus::st_progress_param, TidStoreMemoryUsage(), TimestampDifference(), TimestampDifferenceExceeds(), track_cost_delay_timing, track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, update_relstats_all_indexes(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_get_cutoffs(), VacuumFailsafeActive, verbose, visibilitymap_count(), WalUsage::wal_buffers_full, WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_fpi_bytes, WalUsage::wal_records, and WalUsageAccumDiff().

◆ identify_and_fix_vm_corruption()

static void identify_and_fix_vm_corruption ( Relation  rel,
Buffer  heap_buffer,
BlockNumber  heap_blk,
Page  heap_page,
int  nlpdead_items,
Buffer  vmbuffer,
uint8 vmbits 
)
static

Definition at line 1982 of file vacuumlazy.c.

1987{
1988 Assert(visibilitymap_get_status(rel, heap_blk, &vmbuffer) == *vmbits);
1989
1991
1992 /*
1993 * As of PostgreSQL 9.2, the visibility map bit should never be set if the
1994 * page-level bit is clear. However, it's possible that the bit got
1995 * cleared after heap_vac_scan_next_block() was called, so we must recheck
1996 * with buffer lock before concluding that the VM is corrupt.
1997 */
1999 ((*vmbits & VISIBILITYMAP_VALID_BITS) != 0))
2000 {
2003 errmsg("page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
2005
2006 visibilitymap_clear(rel, heap_blk, vmbuffer,
2008 *vmbits = 0;
2009 }
2010
2011 /*
2012 * It's possible for the value returned by
2013 * GetOldestNonRemovableTransactionId() to move backwards, so it's not
2014 * wrong for us to see tuples that appear to not be visible to everyone
2015 * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
2016 * never moves backwards, but GetOldestNonRemovableTransactionId() is
2017 * conservative and sometimes returns a value that's unnecessarily small,
2018 * so if we see that contradiction it just means that the tuples that we
2019 * think are not visible to everyone yet actually are, and the
2020 * PD_ALL_VISIBLE flag is correct.
2021 *
2022 * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
2023 * however.
2024 */
2025 else if (PageIsAllVisible(heap_page) && nlpdead_items > 0)
2026 {
2029 errmsg("page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
2031
2034 visibilitymap_clear(rel, heap_blk, vmbuffer,
2036 *vmbits = 0;
2037 }
2038}

References Assert, BUFFER_LOCK_EXCLUSIVE, BufferIsLockedByMeInMode(), ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), fb(), MarkBufferDirty(), PageClearAllVisible(), PageIsAllVisible(), RelationGetRelationName, visibilitymap_clear(), visibilitymap_get_status(), VISIBILITYMAP_VALID_BITS, and WARNING.

Referenced by lazy_scan_prune().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 3001 of file vacuumlazy.c.

3002{
3003 /* Don't warn more than once per VACUUM */
3005 return true;
3006
3008 {
3009 const int progress_index[] = {
3013 };
3015
3016 VacuumFailsafeActive = true;
3017
3018 /*
3019 * Abandon use of a buffer access strategy to allow use of all of
3020 * shared buffers. We assume the caller who allocated the memory for
3021 * the BufferAccessStrategy will free it.
3022 */
3023 vacrel->bstrategy = NULL;
3024
3025 /* Disable index vacuuming, index cleanup, and heap rel truncation */
3026 vacrel->do_index_vacuuming = false;
3027 vacrel->do_index_cleanup = false;
3028 vacrel->do_rel_truncate = false;
3029
3030 /* Reset the progress counters and set the failsafe mode */
3032
3034 (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
3035 vacrel->dbname, vacrel->relnamespace, vacrel->relname,
3036 vacrel->num_index_scans),
3037 errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
3038 errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
3039 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
3040
3041 /* Stop applying cost limits from this point on */
3042 VacuumCostActive = false;
3044
3045 return true;
3046 }
3047
3048 return false;
3049}

References ereport, errdetail(), errhint(), errmsg(), fb(), pgstat_progress_update_multi_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_MODE, PROGRESS_VACUUM_MODE_FAILSAFE, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, VacuumFailsafeActive, and WARNING.

Referenced by heap_vacuum_rel(), lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 3055 of file vacuumlazy.c.

3056{
3057 double reltuples = vacrel->new_rel_tuples;
3058 bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
3059 const int progress_start_index[] = {
3062 };
3063 const int progress_end_index[] = {
3066 };
3068 int64 progress_end_val[2] = {0, 0};
3069
3070 Assert(vacrel->do_index_cleanup);
3071 Assert(vacrel->nindexes > 0);
3072
3073 /*
3074 * Report that we are now cleaning up indexes and the number of indexes to
3075 * cleanup.
3076 */
3078 progress_start_val[1] = vacrel->nindexes;
3080
3082 {
3083 for (int idx = 0; idx < vacrel->nindexes; idx++)
3084 {
3085 Relation indrel = vacrel->indrels[idx];
3086 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
3087
3088 vacrel->indstats[idx] =
3089 lazy_cleanup_one_index(indrel, istat, reltuples,
3090 estimated_count, vacrel);
3091
3092 /* Report the number of indexes cleaned up */
3094 idx + 1);
3095 }
3096 }
3097 else
3098 {
3099 /* Outsource everything to parallel variant */
3101 vacrel->num_index_scans,
3102 estimated_count);
3103 }
3104
3105 /* Reset the progress counters */
3107}

References Assert, fb(), idx(), lazy_cleanup_one_index(), parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_PHASE, and PROGRESS_VACUUM_PHASE_INDEX_CLEANUP.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 3172 of file vacuumlazy.c.

3175{
3178
3179 ivinfo.index = indrel;
3180 ivinfo.heaprel = vacrel->rel;
3181 ivinfo.analyze_only = false;
3182 ivinfo.report_progress = false;
3183 ivinfo.estimated_count = estimated_count;
3184 ivinfo.message_level = DEBUG2;
3185
3186 ivinfo.num_heap_tuples = reltuples;
3187 ivinfo.strategy = vacrel->bstrategy;
3188
3189 /*
3190 * Update error traceback information.
3191 *
3192 * The index name is saved during this phase and restored immediately
3193 * after this phase. See vacuum_error_callback.
3194 */
3195 Assert(vacrel->indname == NULL);
3200
3201 istat = vac_cleanup_one_index(&ivinfo, istat);
3202
3203 /* Revert to the previous phase information for error traceback */
3205 pfree(vacrel->indname);
3206 vacrel->indname = NULL;
3207
3208 return istat;
3209}

References Assert, DEBUG2, fb(), InvalidBlockNumber, InvalidOffsetNumber, pfree(), pstrdup(), RelationGetRelationName, restore_vacuum_error_info(), update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel)
static

Definition at line 1252 of file vacuumlazy.c.

1253{
1254 ReadStream *stream;
1255 BlockNumber rel_pages = vacrel->rel_pages,
1256 blkno = 0,
1259 vacrel->eager_scan_remaining_successes; /* for logging */
1260 Buffer vmbuffer = InvalidBuffer;
1261 const int initprog_index[] = {
1265 };
1267
1268 /* Report that we're scanning the heap, advertising total # of blocks */
1270 initprog_val[1] = rel_pages;
1271 initprog_val[2] = vacrel->dead_items_info->max_bytes;
1273
1274 /* Initialize for the first heap_vac_scan_next_block() call */
1275 vacrel->current_block = InvalidBlockNumber;
1276 vacrel->next_unskippable_block = InvalidBlockNumber;
1277 vacrel->next_unskippable_eager_scanned = false;
1278 vacrel->next_unskippable_vmbuffer = InvalidBuffer;
1279
1280 /*
1281 * Set up the read stream for vacuum's first pass through the heap.
1282 *
1283 * This could be made safe for READ_STREAM_USE_BATCHING, but only with
1284 * explicit work in heap_vac_scan_next_block.
1285 */
1287 vacrel->bstrategy,
1288 vacrel->rel,
1291 vacrel,
1292 sizeof(bool));
1293
1294 while (true)
1295 {
1296 Buffer buf;
1297 Page page;
1298 bool was_eager_scanned = false;
1299 int ndeleted = 0;
1300 bool has_lpdead_items;
1301 void *per_buffer_data = NULL;
1302 bool vm_page_frozen = false;
1303 bool got_cleanup_lock = false;
1304
1305 vacuum_delay_point(false);
1306
1307 /*
1308 * Regularly check if wraparound failsafe should trigger.
1309 *
1310 * There is a similar check inside lazy_vacuum_all_indexes(), but
1311 * relfrozenxid might start to look dangerously old before we reach
1312 * that point. This check also provides failsafe coverage for the
1313 * one-pass strategy, and the two-pass strategy with the index_cleanup
1314 * param set to 'off'.
1315 */
1316 if (vacrel->scanned_pages > 0 &&
1317 vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0)
1319
1320 /*
1321 * Consider if we definitely have enough space to process TIDs on page
1322 * already. If we are close to overrunning the available space for
1323 * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
1324 * this page. However, let's force at least one page-worth of tuples
1325 * to be stored as to ensure we do at least some work when the memory
1326 * configured is so low that we run out before storing anything.
1327 */
1328 if (vacrel->dead_items_info->num_items > 0 &&
1329 TidStoreMemoryUsage(vacrel->dead_items) > vacrel->dead_items_info->max_bytes)
1330 {
1331 /*
1332 * Before beginning index vacuuming, we release any pin we may
1333 * hold on the visibility map page. This isn't necessary for
1334 * correctness, but we do it anyway to avoid holding the pin
1335 * across a lengthy, unrelated operation.
1336 */
1337 if (BufferIsValid(vmbuffer))
1338 {
1339 ReleaseBuffer(vmbuffer);
1340 vmbuffer = InvalidBuffer;
1341 }
1342
1343 /* Perform a round of index and heap vacuuming */
1344 vacrel->consider_bypass_optimization = false;
1346
1347 /*
1348 * Vacuum the Free Space Map to make newly-freed space visible on
1349 * upper-level FSM pages. Note that blkno is the previously
1350 * processed block.
1351 */
1353 blkno + 1);
1355
1356 /* Report that we are once again scanning the heap */
1359 }
1360
1361 buf = read_stream_next_buffer(stream, &per_buffer_data);
1362
1363 /* The relation is exhausted. */
1364 if (!BufferIsValid(buf))
1365 break;
1366
1367 was_eager_scanned = *((bool *) per_buffer_data);
1369 page = BufferGetPage(buf);
1370 blkno = BufferGetBlockNumber(buf);
1371
1372 vacrel->scanned_pages++;
1374 vacrel->eager_scanned_pages++;
1375
1376 /* Report as block scanned, update error traceback information */
1379 blkno, InvalidOffsetNumber);
1380
1381 /*
1382 * Pin the visibility map page in case we need to mark the page
1383 * all-visible. In most cases this will be very cheap, because we'll
1384 * already have the correct page pinned anyway.
1385 */
1386 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
1387
1388 /*
1389 * We need a buffer cleanup lock to prune HOT chains and defragment
1390 * the page in lazy_scan_prune. But when it's not possible to acquire
1391 * a cleanup lock right away, we may be able to settle for reduced
1392 * processing using lazy_scan_noprune.
1393 */
1395
1396 if (!got_cleanup_lock)
1398
1399 /* Check for new or empty pages before lazy_scan_[no]prune call */
1401 vmbuffer))
1402 {
1403 /* Processed as new/empty page (lock and pin released) */
1404 continue;
1405 }
1406
1407 /*
1408 * If we didn't get the cleanup lock, we can still collect LP_DEAD
1409 * items in the dead_items area for later vacuuming, count live and
1410 * recently dead tuples for vacuum logging, and determine if this
1411 * block could later be truncated. If we encounter any xid/mxids that
1412 * require advancing the relfrozenxid/relminxid, we'll have to wait
1413 * for a cleanup lock and call lazy_scan_prune().
1414 */
1415 if (!got_cleanup_lock &&
1416 !lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
1417 {
1418 /*
1419 * lazy_scan_noprune could not do all required processing. Wait
1420 * for a cleanup lock, and call lazy_scan_prune in the usual way.
1421 */
1422 Assert(vacrel->aggressive);
1425 got_cleanup_lock = true;
1426 }
1427
1428 /*
1429 * If we have a cleanup lock, we must now prune, freeze, and count
1430 * tuples. We may have acquired the cleanup lock originally, or we may
1431 * have gone back and acquired it after lazy_scan_noprune() returned
1432 * false. Either way, the page hasn't been processed yet.
1433 *
1434 * Like lazy_scan_noprune(), lazy_scan_prune() will count
1435 * recently_dead_tuples and live tuples for vacuum logging, determine
1436 * if the block can later be truncated, and accumulate the details of
1437 * remaining LP_DEAD line pointers on the page into dead_items. These
1438 * dead items include those pruned by lazy_scan_prune() as well as
1439 * line pointers previously marked LP_DEAD.
1440 */
1441 if (got_cleanup_lock)
1442 ndeleted = lazy_scan_prune(vacrel, buf, blkno, page,
1443 vmbuffer,
1445
1446 /*
1447 * Count an eagerly scanned page as a failure or a success.
1448 *
1449 * Only lazy_scan_prune() freezes pages, so if we didn't get the
1450 * cleanup lock, we won't have frozen the page. However, we only count
1451 * pages that were too new to require freezing as eager freeze
1452 * failures.
1453 *
1454 * We could gather more information from lazy_scan_noprune() about
1455 * whether or not there were tuples with XIDs or MXIDs older than the
1456 * FreezeLimit or MultiXactCutoff. However, for simplicity, we simply
1457 * exclude pages skipped due to cleanup lock contention from eager
1458 * freeze algorithm caps.
1459 */
1461 {
1462 /* Aggressive vacuums do not eager scan. */
1463 Assert(!vacrel->aggressive);
1464
1465 if (vm_page_frozen)
1466 {
1467 if (vacrel->eager_scan_remaining_successes > 0)
1468 vacrel->eager_scan_remaining_successes--;
1469
1470 if (vacrel->eager_scan_remaining_successes == 0)
1471 {
1472 /*
1473 * Report only once that we disabled eager scanning. We
1474 * may eagerly read ahead blocks in excess of the success
1475 * or failure caps before attempting to freeze them, so we
1476 * could reach here even after disabling additional eager
1477 * scanning.
1478 */
1479 if (vacrel->eager_scan_max_fails_per_region > 0)
1480 ereport(vacrel->verbose ? INFO : DEBUG2,
1481 (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of relation \"%s.%s.%s\"",
1483 vacrel->dbname, vacrel->relnamespace,
1484 vacrel->relname)));
1485
1486 /*
1487 * If we hit our success cap, permanently disable eager
1488 * scanning by setting the other eager scan management
1489 * fields to their disabled values.
1490 */
1491 vacrel->eager_scan_remaining_fails = 0;
1492 vacrel->next_eager_scan_region_start = InvalidBlockNumber;
1493 vacrel->eager_scan_max_fails_per_region = 0;
1494 }
1495 }
1496 else if (vacrel->eager_scan_remaining_fails > 0)
1497 vacrel->eager_scan_remaining_fails--;
1498 }
1499
1500 /*
1501 * Now drop the buffer lock and, potentially, update the FSM.
1502 *
1503 * Our goal is to update the freespace map the last time we touch the
1504 * page. If we'll process a block in the second pass, we may free up
1505 * additional space on the page, so it is better to update the FSM
1506 * after the second pass. If the relation has no indexes, or if index
1507 * vacuuming is disabled, there will be no second heap pass; if this
1508 * particular page has no dead items, the second heap pass will not
1509 * touch this page. So, in those cases, update the FSM now.
1510 *
1511 * Note: In corner cases, it's possible to miss updating the FSM
1512 * entirely. If index vacuuming is currently enabled, we'll skip the
1513 * FSM update now. But if failsafe mode is later activated, or there
1514 * are so few dead tuples that index vacuuming is bypassed, there will
1515 * also be no opportunity to update the FSM later, because we'll never
1516 * revisit this page. Since updating the FSM is desirable but not
1517 * absolutely required, that's OK.
1518 */
1519 if (vacrel->nindexes == 0
1520 || !vacrel->do_index_vacuuming
1521 || !has_lpdead_items)
1522 {
1523 Size freespace = PageGetHeapFreeSpace(page);
1524
1526 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1527
1528 /*
1529 * Periodically perform FSM vacuuming to make newly-freed space
1530 * visible on upper FSM pages. This is done after vacuuming if the
1531 * table has indexes. There will only be newly-freed space if we
1532 * held the cleanup lock and lazy_scan_prune() was called.
1533 */
1534 if (got_cleanup_lock && vacrel->nindexes == 0 && ndeleted > 0 &&
1536 {
1538 blkno);
1540 }
1541 }
1542 else
1544 }
1545
1546 vacrel->blkno = InvalidBlockNumber;
1547 if (BufferIsValid(vmbuffer))
1548 ReleaseBuffer(vmbuffer);
1549
1550 /*
1551 * Report that everything is now scanned. We never skip scanning the last
1552 * block in the relation, so we can pass rel_pages here.
1553 */
1555 rel_pages);
1556
1557 /* now we can compute the new value for pg_class.reltuples */
1558 vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1559 vacrel->scanned_pages,
1560 vacrel->live_tuples);
1561
1562 /*
1563 * Also compute the total number of surviving heap entries. In the
1564 * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1565 */
1566 vacrel->new_rel_tuples =
1567 Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1568 vacrel->missed_dead_tuples;
1569
1570 read_stream_end(stream);
1571
1572 /*
1573 * Do index vacuuming (call each index's ambulkdelete routine), then do
1574 * related heap vacuuming
1575 */
1576 if (vacrel->dead_items_info->num_items > 0)
1578
1579 /*
1580 * Vacuum the remainder of the Free Space Map. We must do this whether or
1581 * not there were indexes, and whether or not we bypassed index vacuuming.
1582 * We can pass rel_pages here because we never skip scanning the last
1583 * block of the relation.
1584 */
1585 if (rel_pages > next_fsm_block_to_vacuum)
1587
1588 /* report all blocks vacuumed */
1590
1591 /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1592 if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1594}

References Assert, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), CheckBufferIsPinnedOnce(), ConditionalLockBufferForCleanup(), DEBUG2, ereport, errmsg(), FAILSAFE_EVERY_PAGES, fb(), FreeSpaceMapVacuumRange(), heap_vac_scan_next_block(), INFO, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_new_or_empty(), lazy_scan_noprune(), lazy_scan_prune(), lazy_vacuum(), LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, Max, PageGetHeapFreeSpace(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), RecordPageWithFreeSpace(), ReleaseBuffer(), TidStoreMemoryUsage(), UnlockReleaseBuffer(), update_vacuum_error_info(), vac_estimate_reltuples(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, and visibilitymap_pin().

Referenced by heap_vacuum_rel().

◆ lazy_scan_new_or_empty()

static bool lazy_scan_new_or_empty ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool  sharelock,
Buffer  vmbuffer 
)
static

Definition at line 1850 of file vacuumlazy.c.

1852{
1853 Size freespace;
1854
1855 if (PageIsNew(page))
1856 {
1857 /*
1858 * All-zeroes pages can be left over if either a backend extends the
1859 * relation by a single page, but crashes before the newly initialized
1860 * page has been written out, or when bulk-extending the relation
1861 * (which creates a number of empty pages at the tail end of the
1862 * relation), and then enters them into the FSM.
1863 *
1864 * Note we do not enter the page into the visibilitymap. That has the
1865 * downside that we repeatedly visit this page in subsequent vacuums,
1866 * but otherwise we'll never discover the space on a promoted standby.
1867 * The harm of repeated checking ought to normally not be too bad. The
1868 * space usually should be used at some point, otherwise there
1869 * wouldn't be any regular vacuums.
1870 *
1871 * Make sure these pages are in the FSM, to ensure they can be reused.
1872 * Do that by testing if there's any space recorded for the page. If
1873 * not, enter it. We do so after releasing the lock on the heap page,
1874 * the FSM is approximate, after all.
1875 */
1877
1878 if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1879 {
1880 freespace = BLCKSZ - SizeOfPageHeaderData;
1881
1882 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1883 }
1884
1885 return true;
1886 }
1887
1888 if (PageIsEmpty(page))
1889 {
1890 /*
1891 * It seems likely that caller will always be able to get a cleanup
1892 * lock on an empty page. But don't take any chances -- escalate to
1893 * an exclusive lock (still don't need a cleanup lock, though).
1894 */
1895 if (sharelock)
1896 {
1899
1900 if (!PageIsEmpty(page))
1901 {
1902 /* page isn't new or empty -- keep lock and pin for now */
1903 return false;
1904 }
1905 }
1906 else
1907 {
1908 /* Already have a full cleanup lock (which is more than enough) */
1909 }
1910
1911 /*
1912 * Unlike new pages, empty pages are always set all-visible and
1913 * all-frozen.
1914 */
1915 if (!PageIsAllVisible(page))
1916 {
1918
1919 /* mark buffer dirty before writing a WAL record */
1921
1922 /*
1923 * It's possible that another backend has extended the heap,
1924 * initialized the page, and then failed to WAL-log the page due
1925 * to an ERROR. Since heap extension is not WAL-logged, recovery
1926 * might try to replay our record setting the page all-visible and
1927 * find that the page isn't initialized, which will cause a PANIC.
1928 * To prevent that, check whether the page has been previously
1929 * WAL-logged, and if not, do that now.
1930 */
1931 if (RelationNeedsWAL(vacrel->rel) &&
1933 log_newpage_buffer(buf, true);
1934
1935 PageSetAllVisible(page);
1936 visibilitymap_set(vacrel->rel, blkno, buf,
1938 vmbuffer, InvalidTransactionId,
1942
1943 /* Count the newly all-frozen pages for logging */
1944 vacrel->new_all_visible_pages++;
1945 vacrel->new_all_visible_all_frozen_pages++;
1946 }
1947
1948 freespace = PageGetHeapFreeSpace(page);
1950 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1951 return true;
1952 }
1953
1954 /* page isn't new or empty -- keep lock and pin */
1955 return false;
1956}

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, END_CRIT_SECTION, fb(), GetRecordedFreeSpace(), InvalidTransactionId, InvalidXLogRecPtr, LockBuffer(), log_newpage_buffer(), MarkBufferDirty(), PageGetHeapFreeSpace(), PageGetLSN(), PageIsAllVisible(), PageIsEmpty(), PageIsNew(), PageSetAllVisible(), RecordPageWithFreeSpace(), RelationNeedsWAL, SizeOfPageHeaderData, START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), and XLogRecPtrIsValid.

Referenced by lazy_scan_heap().

◆ lazy_scan_noprune()

static bool lazy_scan_noprune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool has_lpdead_items 
)
static

Definition at line 2271 of file vacuumlazy.c.

2276{
2277 OffsetNumber offnum,
2278 maxoff;
2279 int lpdead_items,
2280 live_tuples,
2281 recently_dead_tuples,
2282 missed_dead_tuples;
2283 bool hastup;
2285 TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
2286 MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
2288
2289 Assert(BufferGetBlockNumber(buf) == blkno);
2290
2291 hastup = false; /* for now */
2292
2293 lpdead_items = 0;
2294 live_tuples = 0;
2295 recently_dead_tuples = 0;
2296 missed_dead_tuples = 0;
2297
2298 maxoff = PageGetMaxOffsetNumber(page);
2299 for (offnum = FirstOffsetNumber;
2300 offnum <= maxoff;
2301 offnum = OffsetNumberNext(offnum))
2302 {
2303 ItemId itemid;
2304 HeapTupleData tuple;
2305
2306 vacrel->offnum = offnum;
2307 itemid = PageGetItemId(page, offnum);
2308
2309 if (!ItemIdIsUsed(itemid))
2310 continue;
2311
2312 if (ItemIdIsRedirected(itemid))
2313 {
2314 hastup = true;
2315 continue;
2316 }
2317
2318 if (ItemIdIsDead(itemid))
2319 {
2320 /*
2321 * Deliberately don't set hastup=true here. See same point in
2322 * lazy_scan_prune for an explanation.
2323 */
2324 deadoffsets[lpdead_items++] = offnum;
2325 continue;
2326 }
2327
2328 hastup = true; /* page prevents rel truncation */
2329 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
2331 &NoFreezePageRelfrozenXid,
2332 &NoFreezePageRelminMxid))
2333 {
2334 /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
2335 if (vacrel->aggressive)
2336 {
2337 /*
2338 * Aggressive VACUUMs must always be able to advance rel's
2339 * relfrozenxid to a value >= FreezeLimit (and be able to
2340 * advance rel's relminmxid to a value >= MultiXactCutoff).
2341 * The ongoing aggressive VACUUM won't be able to do that
2342 * unless it can freeze an XID (or MXID) from this tuple now.
2343 *
2344 * The only safe option is to have caller perform processing
2345 * of this page using lazy_scan_prune. Caller might have to
2346 * wait a while for a cleanup lock, but it can't be helped.
2347 */
2348 vacrel->offnum = InvalidOffsetNumber;
2349 return false;
2350 }
2351
2352 /*
2353 * Non-aggressive VACUUMs are under no obligation to advance
2354 * relfrozenxid (even by one XID). We can be much laxer here.
2355 *
2356 * Currently we always just accept an older final relfrozenxid
2357 * and/or relminmxid value. We never make caller wait or work a
2358 * little harder, even when it likely makes sense to do so.
2359 */
2360 }
2361
2362 ItemPointerSet(&(tuple.t_self), blkno, offnum);
2363 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2364 tuple.t_len = ItemIdGetLength(itemid);
2365 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
2366
2367 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
2368 buf))
2369 {
2371 case HEAPTUPLE_LIVE:
2372
2373 /*
2374 * Count both cases as live, just like lazy_scan_prune
2375 */
2376 live_tuples++;
2377
2378 break;
2379 case HEAPTUPLE_DEAD:
2380
2381 /*
2382 * There is some useful work for pruning to do, that won't be
2383 * done due to failure to get a cleanup lock.
2384 */
2385 missed_dead_tuples++;
2386 break;
2388
2389 /*
2390 * Count in recently_dead_tuples, just like lazy_scan_prune
2391 */
2392 recently_dead_tuples++;
2393 break;
2395
2396 /*
2397 * Do not count these rows as live, just like lazy_scan_prune
2398 */
2399 break;
2400 default:
2401 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2402 break;
2403 }
2404 }
2405
2406 vacrel->offnum = InvalidOffsetNumber;
2407
2408 /*
2409 * By here we know for sure that caller can put off freezing and pruning
2410 * this particular page until the next VACUUM. Remember its details now.
2411 * (lazy_scan_prune expects a clean slate, so we have to do this last.)
2412 */
2413 vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
2414 vacrel->NewRelminMxid = NoFreezePageRelminMxid;
2415
2416 /* Save any LP_DEAD items found on the page in dead_items */
2417 if (vacrel->nindexes == 0)
2418 {
2419 /* Using one-pass strategy (since table has no indexes) */
2420 if (lpdead_items > 0)
2421 {
2422 /*
2423 * Perfunctory handling for the corner case where a single pass
2424 * strategy VACUUM cannot get a cleanup lock, and it turns out
2425 * that there is one or more LP_DEAD items: just count the LP_DEAD
2426 * items as missed_dead_tuples instead. (This is a bit dishonest,
2427 * but it beats having to maintain specialized heap vacuuming code
2428 * forever, for vanishingly little benefit.)
2429 */
2430 hastup = true;
2431 missed_dead_tuples += lpdead_items;
2432 }
2433 }
2434 else if (lpdead_items > 0)
2435 {
2436 /*
2437 * Page has LP_DEAD items, and so any references/TIDs that remain in
2438 * indexes will be deleted during index vacuuming (and then marked
2439 * LP_UNUSED in the heap)
2440 */
2441 vacrel->lpdead_item_pages++;
2442
2443 dead_items_add(vacrel, blkno, deadoffsets, lpdead_items);
2444
2445 vacrel->lpdead_items += lpdead_items;
2446 }
2447
2448 /*
2449 * Finally, add relevant page-local counts to whole-VACUUM counts
2450 */
2451 vacrel->live_tuples += live_tuples;
2452 vacrel->recently_dead_tuples += recently_dead_tuples;
2453 vacrel->missed_dead_tuples += missed_dead_tuples;
2454 if (missed_dead_tuples > 0)
2455 vacrel->missed_dead_pages++;
2456
2457 /* Can't truncate this page */
2458 if (hastup)
2459 vacrel->nonempty_pages = blkno + 1;
2460
2461 /* Did we find LP_DEAD items? */
2462 *has_lpdead_items = (lpdead_items > 0);
2463
2464 /* Caller won't need to call lazy_scan_prune with same page */
2465 return true;
2466}

References Assert, buf, BufferGetBlockNumber(), dead_items_add(), elog, ERROR, fb(), FirstOffsetNumber, heap_tuple_should_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), MaxHeapTuplesPerPage, OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by lazy_scan_heap().

◆ lazy_scan_prune()

static int lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
Buffer  vmbuffer,
bool has_lpdead_items,
bool vm_page_frozen 
)
static

Definition at line 2058 of file vacuumlazy.c.

2065{
2066 Relation rel = vacrel->rel;
2068 PruneFreezeParams params = {
2069 .relation = rel,
2070 .buffer = buf,
2071 .reason = PRUNE_VACUUM_SCAN,
2072 .options = HEAP_PAGE_PRUNE_FREEZE,
2073 .vistest = vacrel->vistest,
2074 .cutoffs = &vacrel->cutoffs,
2075 };
2076 uint8 old_vmbits = 0;
2077 uint8 new_vmbits = 0;
2078
2079 Assert(BufferGetBlockNumber(buf) == blkno);
2080
2081 /*
2082 * Prune all HOT-update chains and potentially freeze tuples on this page.
2083 *
2084 * If the relation has no indexes, we can immediately mark would-be dead
2085 * items LP_UNUSED.
2086 *
2087 * The number of tuples removed from the page is returned in
2088 * presult.ndeleted. It should not be confused with presult.lpdead_items;
2089 * presult.lpdead_items's final value can be thought of as the number of
2090 * tuples that were deleted from indexes.
2091 *
2092 * We will update the VM after collecting LP_DEAD items and freezing
2093 * tuples. Pruning will have determined whether or not the page is
2094 * all-visible.
2095 */
2096 if (vacrel->nindexes == 0)
2098
2100 &presult,
2101 &vacrel->offnum,
2102 &vacrel->NewRelfrozenXid, &vacrel->NewRelminMxid);
2103
2104 Assert(MultiXactIdIsValid(vacrel->NewRelminMxid));
2105 Assert(TransactionIdIsValid(vacrel->NewRelfrozenXid));
2106
2107 if (presult.nfrozen > 0)
2108 {
2109 /*
2110 * We don't increment the new_frozen_tuple_pages instrumentation
2111 * counter when nfrozen == 0, since it only counts pages with newly
2112 * frozen tuples (don't confuse that with pages newly set all-frozen
2113 * in VM).
2114 */
2115 vacrel->new_frozen_tuple_pages++;
2116 }
2117
2118 /*
2119 * VACUUM will call heap_page_is_all_visible() during the second pass over
2120 * the heap to determine all_visible and all_frozen for the page -- this
2121 * is a specialized version of the logic from this function. Now that
2122 * we've finished pruning and freezing, make sure that we're in total
2123 * agreement with heap_page_is_all_visible() using an assertion.
2124 */
2125#ifdef USE_ASSERT_CHECKING
2126 if (presult.all_visible)
2127 {
2129 bool debug_all_frozen;
2130
2131 Assert(presult.lpdead_items == 0);
2132
2134 vacrel->cutoffs.OldestXmin, &debug_all_frozen,
2135 &debug_cutoff, &vacrel->offnum));
2136
2137 Assert(presult.all_frozen == debug_all_frozen);
2138
2140 debug_cutoff == presult.vm_conflict_horizon);
2141 }
2142#endif
2143
2144 /*
2145 * Now save details of the LP_DEAD items from the page in vacrel
2146 */
2147 if (presult.lpdead_items > 0)
2148 {
2149 vacrel->lpdead_item_pages++;
2150
2151 /*
2152 * deadoffsets are collected incrementally in
2153 * heap_page_prune_and_freeze() as each dead line pointer is recorded,
2154 * with an indeterminate order, but dead_items_add requires them to be
2155 * sorted.
2156 */
2157 qsort(presult.deadoffsets, presult.lpdead_items, sizeof(OffsetNumber),
2159
2160 dead_items_add(vacrel, blkno, presult.deadoffsets, presult.lpdead_items);
2161 }
2162
2163 /* Finally, add page-local counts to whole-VACUUM counts */
2164 vacrel->tuples_deleted += presult.ndeleted;
2165 vacrel->tuples_frozen += presult.nfrozen;
2166 vacrel->lpdead_items += presult.lpdead_items;
2167 vacrel->live_tuples += presult.live_tuples;
2168 vacrel->recently_dead_tuples += presult.recently_dead_tuples;
2169
2170 /* Can't truncate this page */
2171 if (presult.hastup)
2172 vacrel->nonempty_pages = blkno + 1;
2173
2174 /* Did we find LP_DEAD items? */
2175 *has_lpdead_items = (presult.lpdead_items > 0);
2176
2177 Assert(!presult.all_visible || !(*has_lpdead_items));
2178 Assert(!presult.all_frozen || presult.all_visible);
2179
2180 old_vmbits = visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer);
2181
2182 identify_and_fix_vm_corruption(vacrel->rel, buf, blkno, page,
2183 presult.lpdead_items, vmbuffer,
2184 &old_vmbits);
2185
2186 if (!presult.all_visible)
2187 return presult.ndeleted;
2188
2189 /* Set the visibility map and page visibility hint */
2191
2192 if (presult.all_frozen)
2194
2195 /* Nothing to do */
2196 if (old_vmbits == new_vmbits)
2197 return presult.ndeleted;
2198
2199 /*
2200 * It should never be the case that the visibility map page is set while
2201 * the page-level bit is clear (and if so, we cleared it above), but the
2202 * reverse is allowed (if checksums are not enabled). Regardless, set both
2203 * bits so that we get back in sync.
2204 *
2205 * The heap buffer must be marked dirty before adding it to the WAL chain
2206 * when setting the VM. We don't worry about unnecessarily dirtying the
2207 * heap buffer if PD_ALL_VISIBLE is already set, though. It is extremely
2208 * rare to have a clean heap buffer with PD_ALL_VISIBLE already set and
2209 * the VM bits clear, so there is no point in optimizing it.
2210 */
2211 PageSetAllVisible(page);
2213
2214 /*
2215 * If the page is being set all-frozen, we pass InvalidTransactionId as
2216 * the cutoff_xid, since a snapshot conflict horizon sufficient to make
2217 * everything safe for REDO was logged when the page's tuples were frozen.
2218 */
2219 Assert(!presult.all_frozen ||
2220 !TransactionIdIsValid(presult.vm_conflict_horizon));
2221
2222 visibilitymap_set(vacrel->rel, blkno, buf,
2224 vmbuffer, presult.vm_conflict_horizon,
2225 new_vmbits);
2226
2227 /*
2228 * If the page wasn't already set all-visible and/or all-frozen in the VM,
2229 * count it as newly set for logging.
2230 */
2232 {
2233 vacrel->new_all_visible_pages++;
2234 if (presult.all_frozen)
2235 {
2236 vacrel->new_all_visible_all_frozen_pages++;
2237 *vm_page_frozen = true;
2238 }
2239 }
2240 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
2241 presult.all_frozen)
2242 {
2243 vacrel->new_all_frozen_pages++;
2244 *vm_page_frozen = true;
2245 }
2246
2247 return presult.ndeleted;
2248}
Relation relation
Definition heapam.h:238

References Assert, buf, BufferGetBlockNumber(), cmpOffsetNumbers(), dead_items_add(), fb(), heap_page_prune_and_freeze(), HEAP_PAGE_PRUNE_FREEZE, HEAP_PAGE_PRUNE_MARK_UNUSED_NOW, identify_and_fix_vm_corruption(), InvalidXLogRecPtr, MarkBufferDirty(), MultiXactIdIsValid, PruneFreezeParams::options, PageSetAllVisible(), PRUNE_VACUUM_SCAN, qsort, PruneFreezeParams::relation, TransactionIdIsValid, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_get_status(), and visibilitymap_set().

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 3252 of file vacuumlazy.c.

3253{
3254 BlockNumber orig_rel_pages = vacrel->rel_pages;
3257 int lock_retry;
3258
3259 /* Report that we are now truncating */
3262
3263 /* Update error traceback information one last time */
3265 vacrel->nonempty_pages, InvalidOffsetNumber);
3266
3267 /*
3268 * Loop until no more truncating can be done.
3269 */
3270 do
3271 {
3272 /*
3273 * We need full exclusive lock on the relation in order to do
3274 * truncation. If we can't get it, give up rather than waiting --- we
3275 * don't want to block other backends, and we don't want to deadlock
3276 * (which is quite possible considering we already hold a lower-grade
3277 * lock).
3278 */
3279 lock_waiter_detected = false;
3280 lock_retry = 0;
3281 while (true)
3282 {
3284 break;
3285
3286 /*
3287 * Check for interrupts while trying to (re-)acquire the exclusive
3288 * lock.
3289 */
3291
3294 {
3295 /*
3296 * We failed to establish the lock in the specified number of
3297 * retries. This means we give up truncating.
3298 */
3299 ereport(vacrel->verbose ? INFO : DEBUG2,
3300 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
3301 vacrel->relname)));
3302 return;
3303 }
3304
3310 }
3311
3312 /*
3313 * Now that we have exclusive lock, look to see if the rel has grown
3314 * whilst we were vacuuming with non-exclusive lock. If so, give up;
3315 * the newly added pages presumably contain non-deletable tuples.
3316 */
3319 {
3320 /*
3321 * Note: we intentionally don't update vacrel->rel_pages with the
3322 * new rel size here. If we did, it would amount to assuming that
3323 * the new pages are empty, which is unlikely. Leaving the numbers
3324 * alone amounts to assuming that the new pages have the same
3325 * tuple density as existing ones, which is less unlikely.
3326 */
3328 return;
3329 }
3330
3331 /*
3332 * Scan backwards from the end to verify that the end pages actually
3333 * contain no tuples. This is *necessary*, not optional, because
3334 * other backends could have added tuples to these pages whilst we
3335 * were vacuuming.
3336 */
3338 vacrel->blkno = new_rel_pages;
3339
3341 {
3342 /* can't do anything after all */
3344 return;
3345 }
3346
3347 /*
3348 * Okay to truncate.
3349 */
3351
3352 /*
3353 * We can release the exclusive lock as soon as we have truncated.
3354 * Other backends can't safely access the relation until they have
3355 * processed the smgr invalidation that smgrtruncate sent out ... but
3356 * that should happen as part of standard invalidation processing once
3357 * they acquire lock on the relation.
3358 */
3360
3361 /*
3362 * Update statistics. Here, it *is* correct to adjust rel_pages
3363 * without also touching reltuples, since the tuple count wasn't
3364 * changed by the truncation.
3365 */
3366 vacrel->removed_pages += orig_rel_pages - new_rel_pages;
3367 vacrel->rel_pages = new_rel_pages;
3368
3369 ereport(vacrel->verbose ? INFO : DEBUG2,
3370 (errmsg("table \"%s\": truncated %u to %u pages",
3371 vacrel->relname,
3374 } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
3375}

References AccessExclusiveLock, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), fb(), INFO, InvalidOffsetNumber, MyLatch, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, RelationGetNumberOfBlocks, RelationTruncate(), ResetLatch(), UnlockRelation(), update_vacuum_error_info(), VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 2482 of file vacuumlazy.c.

2483{
2484 bool bypass;
2485
2486 /* Should not end up here with no indexes */
2487 Assert(vacrel->nindexes > 0);
2488 Assert(vacrel->lpdead_item_pages > 0);
2489
2490 if (!vacrel->do_index_vacuuming)
2491 {
2492 Assert(!vacrel->do_index_cleanup);
2494 return;
2495 }
2496
2497 /*
2498 * Consider bypassing index vacuuming (and heap vacuuming) entirely.
2499 *
2500 * We currently only do this in cases where the number of LP_DEAD items
2501 * for the entire VACUUM operation is close to zero. This avoids sharp
2502 * discontinuities in the duration and overhead of successive VACUUM
2503 * operations that run against the same table with a fixed workload.
2504 * Ideally, successive VACUUM operations will behave as if there are
2505 * exactly zero LP_DEAD items in cases where there are close to zero.
2506 *
2507 * This is likely to be helpful with a table that is continually affected
2508 * by UPDATEs that can mostly apply the HOT optimization, but occasionally
2509 * have small aberrations that lead to just a few heap pages retaining
2510 * only one or two LP_DEAD items. This is pretty common; even when the
2511 * DBA goes out of their way to make UPDATEs use HOT, it is practically
2512 * impossible to predict whether HOT will be applied in 100% of cases.
2513 * It's far easier to ensure that 99%+ of all UPDATEs against a table use
2514 * HOT through careful tuning.
2515 */
2516 bypass = false;
2517 if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
2518 {
2520
2521 Assert(vacrel->num_index_scans == 0);
2522 Assert(vacrel->lpdead_items == vacrel->dead_items_info->num_items);
2523 Assert(vacrel->do_index_vacuuming);
2524 Assert(vacrel->do_index_cleanup);
2525
2526 /*
2527 * This crossover point at which we'll start to do index vacuuming is
2528 * expressed as a percentage of the total number of heap pages in the
2529 * table that are known to have at least one LP_DEAD item. This is
2530 * much more important than the total number of LP_DEAD items, since
2531 * it's a proxy for the number of heap pages whose visibility map bits
2532 * cannot be set on account of bypassing index and heap vacuuming.
2533 *
2534 * We apply one further precautionary test: the space currently used
2535 * to store the TIDs (TIDs that now all point to LP_DEAD items) must
2536 * not exceed 32MB. This limits the risk that we will bypass index
2537 * vacuuming again and again until eventually there is a VACUUM whose
2538 * dead_items space is not CPU cache resident.
2539 *
2540 * We don't take any special steps to remember the LP_DEAD items (such
2541 * as counting them in our final update to the stats system) when the
2542 * optimization is applied. Though the accounting used in analyze.c's
2543 * acquire_sample_rows() will recognize the same LP_DEAD items as dead
2544 * rows in its own stats report, that's okay. The discrepancy should
2545 * be negligible. If this optimization is ever expanded to cover more
2546 * cases then this may need to be reconsidered.
2547 */
2549 bypass = (vacrel->lpdead_item_pages < threshold &&
2550 TidStoreMemoryUsage(vacrel->dead_items) < 32 * 1024 * 1024);
2551 }
2552
2553 if (bypass)
2554 {
2555 /*
2556 * There are almost zero TIDs. Behave as if there were precisely
2557 * zero: bypass index vacuuming, but do index cleanup.
2558 *
2559 * We expect that the ongoing VACUUM operation will finish very
2560 * quickly, so there is no point in considering speeding up as a
2561 * failsafe against wraparound failure. (Index cleanup is expected to
2562 * finish very quickly in cases where there were no ambulkdelete()
2563 * calls.)
2564 */
2565 vacrel->do_index_vacuuming = false;
2566 }
2568 {
2569 /*
2570 * We successfully completed a round of index vacuuming. Do related
2571 * heap vacuuming now.
2572 */
2574 }
2575 else
2576 {
2577 /*
2578 * Failsafe case.
2579 *
2580 * We attempted index vacuuming, but didn't finish a full round/full
2581 * index scan. This happens when relfrozenxid or relminmxid is too
2582 * far in the past.
2583 *
2584 * From this point on the VACUUM operation will do no further index
2585 * vacuuming or heap vacuuming. This VACUUM operation won't end up
2586 * back here again.
2587 */
2589 }
2590
2591 /*
2592 * Forget the LP_DEAD items that we just vacuumed (or just decided to not
2593 * vacuum)
2594 */
2596}

References Assert, BYPASS_THRESHOLD_PAGES, dead_items_reset(), fb(), lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), TidStoreMemoryUsage(), and VacuumFailsafeActive.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 2607 of file vacuumlazy.c.

2608{
2609 bool allindexes = true;
2610 double old_live_tuples = vacrel->rel->rd_rel->reltuples;
2611 const int progress_start_index[] = {
2614 };
2615 const int progress_end_index[] = {
2619 };
2622
2623 Assert(vacrel->nindexes > 0);
2624 Assert(vacrel->do_index_vacuuming);
2625 Assert(vacrel->do_index_cleanup);
2626
2627 /* Precheck for XID wraparound emergencies */
2629 {
2630 /* Wraparound emergency -- don't even start an index scan */
2631 return false;
2632 }
2633
2634 /*
2635 * Report that we are now vacuuming indexes and the number of indexes to
2636 * vacuum.
2637 */
2639 progress_start_val[1] = vacrel->nindexes;
2641
2643 {
2644 for (int idx = 0; idx < vacrel->nindexes; idx++)
2645 {
2646 Relation indrel = vacrel->indrels[idx];
2647 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2648
2649 vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat,
2651 vacrel);
2652
2653 /* Report the number of indexes vacuumed */
2655 idx + 1);
2656
2658 {
2659 /* Wraparound emergency -- end current index scan */
2660 allindexes = false;
2661 break;
2662 }
2663 }
2664 }
2665 else
2666 {
2667 /* Outsource everything to parallel variant */
2669 vacrel->num_index_scans);
2670
2671 /*
2672 * Do a postcheck to consider applying wraparound failsafe now. Note
2673 * that parallel VACUUM only gets the precheck and this postcheck.
2674 */
2676 allindexes = false;
2677 }
2678
2679 /*
2680 * We delete all LP_DEAD items from the first heap pass in all indexes on
2681 * each call here (except calls where we choose to do the failsafe). This
2682 * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2683 * of the failsafe triggering, which prevents the next call from taking
2684 * place).
2685 */
2686 Assert(vacrel->num_index_scans > 0 ||
2687 vacrel->dead_items_info->num_items == vacrel->lpdead_items);
2689
2690 /*
2691 * Increase and report the number of index scans. Also, we reset
2692 * PROGRESS_VACUUM_INDEXES_TOTAL and PROGRESS_VACUUM_INDEXES_PROCESSED.
2693 *
2694 * We deliberately include the case where we started a round of bulk
2695 * deletes that we weren't able to finish due to the failsafe triggering.
2696 */
2697 vacrel->num_index_scans++;
2698 progress_end_val[0] = 0;
2699 progress_end_val[1] = 0;
2700 progress_end_val[2] = vacrel->num_index_scans;
2702
2703 return allindexes;
2704}

References Assert, fb(), idx(), lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, and VacuumFailsafeActive.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static void lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
OffsetNumber deadoffsets,
int  num_offsets,
Buffer  vmbuffer 
)
static

Definition at line 2870 of file vacuumlazy.c.

2873{
2874 Page page = BufferGetPage(buffer);
2876 int nunused = 0;
2877 TransactionId visibility_cutoff_xid;
2879 bool all_frozen;
2881 uint8 vmflags = 0;
2882
2883 Assert(vacrel->do_index_vacuuming);
2884
2886
2887 /* Update error traceback information */
2891
2892 /*
2893 * Before marking dead items unused, check whether the page will become
2894 * all-visible once that change is applied. This lets us reap the tuples
2895 * and mark the page all-visible within the same critical section,
2896 * enabling both changes to be emitted in a single WAL record. Since the
2897 * visibility checks may perform I/O and allocate memory, they must be
2898 * done outside the critical section.
2899 */
2900 if (heap_page_would_be_all_visible(vacrel->rel, buffer,
2901 vacrel->cutoffs.OldestXmin,
2902 deadoffsets, num_offsets,
2903 &all_frozen, &visibility_cutoff_xid,
2904 &vacrel->offnum))
2905 {
2907 if (all_frozen)
2908 {
2910 Assert(!TransactionIdIsValid(visibility_cutoff_xid));
2911 }
2912
2913 /*
2914 * Take the lock on the vmbuffer before entering a critical section.
2915 * The heap page lock must also be held while updating the VM to
2916 * ensure consistency.
2917 */
2919 }
2920
2922
2923 for (int i = 0; i < num_offsets; i++)
2924 {
2925 ItemId itemid;
2926 OffsetNumber toff = deadoffsets[i];
2927
2928 itemid = PageGetItemId(page, toff);
2929
2930 Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2931 ItemIdSetUnused(itemid);
2932 unused[nunused++] = toff;
2933 }
2934
2935 Assert(nunused > 0);
2936
2937 /* Attempt to truncate line pointer array now */
2939
2940 if ((vmflags & VISIBILITYMAP_VALID_BITS) != 0)
2941 {
2942 /*
2943 * The page is guaranteed to have had dead line pointers, so we always
2944 * set PD_ALL_VISIBLE.
2945 */
2946 PageSetAllVisible(page);
2948 vmbuffer, vmflags,
2949 vacrel->rel->rd_locator);
2950 conflict_xid = visibility_cutoff_xid;
2951 }
2952
2953 /*
2954 * Mark buffer dirty before we write WAL.
2955 */
2956 MarkBufferDirty(buffer);
2957
2958 /* XLOG stuff */
2959 if (RelationNeedsWAL(vacrel->rel))
2960 {
2961 log_heap_prune_and_freeze(vacrel->rel, buffer,
2962 vmflags != 0 ? vmbuffer : InvalidBuffer,
2963 vmflags,
2965 false, /* no cleanup lock required */
2967 NULL, 0, /* frozen */
2968 NULL, 0, /* redirected */
2969 NULL, 0, /* dead */
2970 unused, nunused);
2971 }
2972
2974
2976 {
2977 /* Count the newly set VM page for logging */
2978 LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
2979 vacrel->new_all_visible_pages++;
2980 if (all_frozen)
2981 vacrel->new_all_visible_all_frozen_pages++;
2982 }
2983
2984 /* Revert to the previous phase information for error traceback */
2986}

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage(), END_CRIT_SECTION, fb(), heap_page_would_be_all_visible(), i, InvalidBuffer, InvalidOffsetNumber, InvalidTransactionId, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, LockBuffer(), log_heap_prune_and_freeze(), MarkBufferDirty(), MaxHeapTuplesPerPage, PageGetItemId(), PageSetAllVisible(), PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PRUNE_VACUUM_CLEANUP, RelationNeedsWAL, restore_vacuum_error_info(), START_CRIT_SECTION, TransactionIdIsValid, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set_vmbits(), and VISIBILITYMAP_VALID_BITS.

Referenced by lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2752 of file vacuumlazy.c.

2753{
2754 ReadStream *stream;
2756 Buffer vmbuffer = InvalidBuffer;
2758 TidStoreIter *iter;
2759
2760 Assert(vacrel->do_index_vacuuming);
2761 Assert(vacrel->do_index_cleanup);
2762 Assert(vacrel->num_index_scans > 0);
2763
2764 /* Report that we are now vacuuming the heap */
2767
2768 /* Update error traceback information */
2772
2773 iter = TidStoreBeginIterate(vacrel->dead_items);
2774
2775 /*
2776 * Set up the read stream for vacuum's second pass through the heap.
2777 *
2778 * It is safe to use batchmode, as vacuum_reap_lp_read_stream_next() does
2779 * not need to wait for IO and does not perform locking. Once we support
2780 * parallelism it should still be fine, as presumably the holder of locks
2781 * would never be blocked by IO while holding the lock.
2782 */
2785 vacrel->bstrategy,
2786 vacrel->rel,
2789 iter,
2790 sizeof(TidStoreIterResult));
2791
2792 while (true)
2793 {
2794 BlockNumber blkno;
2795 Buffer buf;
2796 Page page;
2798 Size freespace;
2800 int num_offsets;
2801
2802 vacuum_delay_point(false);
2803
2804 buf = read_stream_next_buffer(stream, (void **) &iter_result);
2805
2806 /* The relation is exhausted */
2807 if (!BufferIsValid(buf))
2808 break;
2809
2810 vacrel->blkno = blkno = BufferGetBlockNumber(buf);
2811
2814 Assert(num_offsets <= lengthof(offsets));
2815
2816 /*
2817 * Pin the visibility map page in case we need to mark the page
2818 * all-visible. In most cases this will be very cheap, because we'll
2819 * already have the correct page pinned anyway.
2820 */
2821 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
2822
2823 /* We need a non-cleanup exclusive lock to mark dead_items unused */
2825 lazy_vacuum_heap_page(vacrel, blkno, buf, offsets,
2826 num_offsets, vmbuffer);
2827
2828 /* Now that we've vacuumed the page, record its available space */
2829 page = BufferGetPage(buf);
2830 freespace = PageGetHeapFreeSpace(page);
2831
2833 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
2835 }
2836
2837 read_stream_end(stream);
2838 TidStoreEndIterate(iter);
2839
2840 vacrel->blkno = InvalidBlockNumber;
2841 if (BufferIsValid(vmbuffer))
2842 ReleaseBuffer(vmbuffer);
2843
2844 /*
2845 * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2846 * the second heap pass. No more, no less.
2847 */
2848 Assert(vacrel->num_index_scans > 1 ||
2849 (vacrel->dead_items_info->num_items == vacrel->lpdead_items &&
2850 vacuumed_pages == vacrel->lpdead_item_pages));
2851
2853 (errmsg("table \"%s\": removed %" PRId64 " dead item identifiers in %u pages",
2854 vacrel->relname, vacrel->dead_items_info->num_items,
2855 vacuumed_pages)));
2856
2857 /* Revert to the previous phase information for error traceback */
2859}

References Assert, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), DEBUG2, ereport, errmsg(), fb(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_vacuum_heap_page(), lengthof, LockBuffer(), MAIN_FORKNUM, MaxOffsetNumber, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), READ_STREAM_USE_BATCHING, RecordPageWithFreeSpace(), ReleaseBuffer(), restore_vacuum_error_info(), TidStoreBeginIterate(), TidStoreEndIterate(), TidStoreGetBlockOffsets(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, vacuum_reap_lp_read_stream_next(), and visibilitymap_pin().

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 3123 of file vacuumlazy.c.

3125{
3128
3129 ivinfo.index = indrel;
3130 ivinfo.heaprel = vacrel->rel;
3131 ivinfo.analyze_only = false;
3132 ivinfo.report_progress = false;
3133 ivinfo.estimated_count = true;
3134 ivinfo.message_level = DEBUG2;
3135 ivinfo.num_heap_tuples = reltuples;
3136 ivinfo.strategy = vacrel->bstrategy;
3137
3138 /*
3139 * Update error traceback information.
3140 *
3141 * The index name is saved during this phase and restored immediately
3142 * after this phase. See vacuum_error_callback.
3143 */
3144 Assert(vacrel->indname == NULL);
3149
3150 /* Do bulk deletion */
3151 istat = vac_bulkdel_one_index(&ivinfo, istat, vacrel->dead_items,
3152 vacrel->dead_items_info);
3153
3154 /* Revert to the previous phase information for error traceback */
3156 pfree(vacrel->indname);
3157 vacrel->indname = NULL;
3158
3159 return istat;
3160}

References Assert, DEBUG2, fb(), InvalidBlockNumber, InvalidOffsetNumber, pfree(), pstrdup(), RelationGetRelationName, restore_vacuum_error_info(), update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3964 of file vacuumlazy.c.

3966{
3967 vacrel->blkno = saved_vacrel->blkno;
3968 vacrel->offnum = saved_vacrel->offnum;
3969 vacrel->phase = saved_vacrel->phase;
3970}

References fb().

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 3232 of file vacuumlazy.c.

3233{
3235
3236 if (!vacrel->do_rel_truncate || VacuumFailsafeActive)
3237 return false;
3238
3239 possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
3240 if (possibly_freeable > 0 &&
3243 return true;
3244
3245 return false;
3246}

References fb(), REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, and VacuumFailsafeActive.

Referenced by heap_vacuum_rel().

◆ update_relstats_all_indexes()

static void update_relstats_all_indexes ( LVRelState vacrel)
static

Definition at line 3846 of file vacuumlazy.c.

3847{
3848 Relation *indrels = vacrel->indrels;
3849 int nindexes = vacrel->nindexes;
3850 IndexBulkDeleteResult **indstats = vacrel->indstats;
3851
3852 Assert(vacrel->do_index_cleanup);
3853
3854 for (int idx = 0; idx < nindexes; idx++)
3855 {
3856 Relation indrel = indrels[idx];
3857 IndexBulkDeleteResult *istat = indstats[idx];
3858
3859 if (istat == NULL || istat->estimated_count)
3860 continue;
3861
3862 /* Update index statistics */
3864 istat->num_pages,
3865 istat->num_index_tuples,
3866 0, 0,
3867 false,
3870 NULL, NULL, false);
3871 }
3872}

References Assert, IndexBulkDeleteResult::estimated_count, fb(), idx(), InvalidMultiXactId, InvalidTransactionId, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by heap_vacuum_rel().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3945 of file vacuumlazy.c.

3947{
3948 if (saved_vacrel)
3949 {
3950 saved_vacrel->offnum = vacrel->offnum;
3951 saved_vacrel->blkno = vacrel->blkno;
3952 saved_vacrel->phase = vacrel->phase;
3953 }
3954
3955 vacrel->blkno = blkno;
3956 vacrel->offnum = offnum;
3957 vacrel->phase = phase;
3958}

References fb().

Referenced by lazy_cleanup_one_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void arg)
static

Definition at line 3881 of file vacuumlazy.c.

3882{
3884
3885 switch (errinfo->phase)
3886 {
3888 if (BlockNumberIsValid(errinfo->blkno))
3889 {
3890 if (OffsetNumberIsValid(errinfo->offnum))
3891 errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3892 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3893 else
3894 errcontext("while scanning block %u of relation \"%s.%s\"",
3895 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3896 }
3897 else
3898 errcontext("while scanning relation \"%s.%s\"",
3899 errinfo->relnamespace, errinfo->relname);
3900 break;
3901
3903 if (BlockNumberIsValid(errinfo->blkno))
3904 {
3905 if (OffsetNumberIsValid(errinfo->offnum))
3906 errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3907 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3908 else
3909 errcontext("while vacuuming block %u of relation \"%s.%s\"",
3910 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3911 }
3912 else
3913 errcontext("while vacuuming relation \"%s.%s\"",
3914 errinfo->relnamespace, errinfo->relname);
3915 break;
3916
3918 errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3919 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3920 break;
3921
3923 errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3924 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3925 break;
3926
3928 if (BlockNumberIsValid(errinfo->blkno))
3929 errcontext("while truncating relation \"%s.%s\" to %u blocks",
3930 errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3931 break;
3932
3934 default:
3935 return; /* do nothing; the errinfo may not be
3936 * initialized */
3937 }
3938}

References arg, BlockNumberIsValid(), errcontext, fb(), OffsetNumberIsValid, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().

◆ vacuum_reap_lp_read_stream_next()

static BlockNumber vacuum_reap_lp_read_stream_next ( ReadStream stream,
void callback_private_data,
void per_buffer_data 
)
static

Definition at line 2714 of file vacuumlazy.c.

2717{
2718 TidStoreIter *iter = callback_private_data;
2720
2722 if (iter_result == NULL)
2723 return InvalidBlockNumber;
2724
2725 /*
2726 * Save the TidStoreIterResult for later, so we can extract the offsets.
2727 * It is safe to copy the result, according to TidStoreIterateNext().
2728 */
2729 memcpy(per_buffer_data, iter_result, sizeof(*iter_result));
2730
2731 return iter_result->blkno;
2732}

References fb(), InvalidBlockNumber, and TidStoreIterateNext().

Referenced by lazy_vacuum_heap_rel().