166#define REL_TRUNCATE_MINIMUM 1000
167#define REL_TRUNCATE_FRACTION 16
176#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20
177#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50
178#define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000
184#define BYPASS_THRESHOLD_PAGES 0.02
190#define FAILSAFE_EVERY_PAGES \
191 ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
199#define VACUUM_FSM_EVERY_PAGES \
200 ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
206#define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
212#define PREFETCH_SIZE ((BlockNumber) 32)
218#define ParallelVacuumIsActive(vacrel) ((vacrel)->pvs != NULL)
238#define MAX_EAGER_FREEZE_SUCCESS_RATE 0.2
247#define EAGER_SCAN_REGION_SIZE 4096
419 void *callback_private_data,
420 void *per_buffer_data);
452 bool estimated_count,
464#ifdef USE_ASSERT_CHECKING
511 vacrel->eager_scan_max_fails_per_region = 0;
512 vacrel->eager_scan_remaining_fails = 0;
513 vacrel->eager_scan_remaining_successes = 0;
554 vacrel->cutoffs.FreezeLimit))
560 vacrel->cutoffs.MultiXactCutoff))
574 vacrel->eager_scan_remaining_successes =
579 if (
vacrel->eager_scan_remaining_successes == 0)
596 vacrel->eager_scan_max_fails_per_region =
607 vacrel->eager_scan_remaining_fails =
608 vacrel->eager_scan_max_fails_per_region *
702 vacrel->bstrategy = bstrategy;
703 if (instrument &&
vacrel->nindexes > 0)
707 for (
int i = 0;
i <
vacrel->nindexes;
i++)
729 vacrel->consider_bypass_optimization =
true;
730 vacrel->do_index_vacuuming =
true;
731 vacrel->do_index_cleanup =
true;
736 vacrel->do_index_vacuuming =
false;
737 vacrel->do_index_cleanup =
false;
742 vacrel->consider_bypass_optimization =
false;
751 vacrel->scanned_pages = 0;
752 vacrel->eager_scanned_pages = 0;
753 vacrel->removed_pages = 0;
754 vacrel->new_frozen_tuple_pages = 0;
755 vacrel->lpdead_item_pages = 0;
756 vacrel->missed_dead_pages = 0;
757 vacrel->nonempty_pages = 0;
761 vacrel->new_rel_tuples = 0;
762 vacrel->new_live_tuples = 0;
767 vacrel->num_index_scans = 0;
768 vacrel->num_dead_items_resets = 0;
769 vacrel->total_dead_items_bytes = 0;
770 vacrel->tuples_deleted = 0;
771 vacrel->tuples_frozen = 0;
774 vacrel->recently_dead_tuples = 0;
775 vacrel->missed_dead_tuples = 0;
777 vacrel->vm_new_visible_pages = 0;
778 vacrel->vm_new_visible_frozen_pages = 0;
779 vacrel->vm_new_frozen_pages = 0;
810 vacrel->skippedallvis =
false;
818 vacrel->aggressive =
true;
822 vacrel->skipwithvm = skipwithvm;
841 (
errmsg(
"aggressively vacuuming \"%s.%s.%s\"",
846 (
errmsg(
"vacuuming \"%s.%s.%s\"",
888 if (
vacrel->do_index_cleanup)
914 vacrel->cutoffs.relfrozenxid,
915 vacrel->NewRelfrozenXid));
918 vacrel->cutoffs.relminmxid,
920 if (
vacrel->skippedallvis)
974 vacrel->recently_dead_tuples +
975 vacrel->missed_dead_tuples,
1021 msgfmt =
_(
"finished vacuuming \"%s.%s.%s\": index scans: %d\n");
1032 msgfmt =
_(
"automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1034 msgfmt =
_(
"automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1039 msgfmt =
_(
"automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
1041 msgfmt =
_(
"automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
1047 vacrel->num_index_scans);
1048 appendStringInfo(&
buf,
_(
"pages: %u removed, %u remain, %u scanned (%.2f%% of total), %u eagerly scanned\n"),
1053 100.0 *
vacrel->scanned_pages /
1055 vacrel->eager_scanned_pages);
1057 _(
"tuples: %" PRId64 " removed, %" PRId64 " remain, %" PRId64 " are dead but not yet removable\n"),
1060 vacrel->recently_dead_tuples);
1061 if (
vacrel->missed_dead_tuples > 0)
1063 _(
"tuples missed: %" PRId64 " dead from %u pages not removed due to cleanup lock contention\n"),
1064 vacrel->missed_dead_tuples,
1065 vacrel->missed_dead_pages);
1067 vacrel->cutoffs.OldestXmin);
1069 _(
"removable cutoff: %u, which was %d XIDs old when operation ended\n"),
1074 vacrel->cutoffs.relfrozenxid);
1076 _(
"new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
1082 vacrel->cutoffs.relminmxid);
1084 _(
"new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
1088 vacrel->new_frozen_tuple_pages,
1090 100.0 *
vacrel->new_frozen_tuple_pages /
1095 _(
"visibility map: %u pages set all-visible, %u pages set all-frozen (%u were all-visible)\n"),
1096 vacrel->vm_new_visible_pages,
1097 vacrel->vm_new_visible_frozen_pages +
1098 vacrel->vm_new_frozen_pages,
1099 vacrel->vm_new_frozen_pages);
1100 if (
vacrel->do_index_vacuuming)
1102 if (
vacrel->nindexes == 0 ||
vacrel->num_index_scans == 0)
1107 msgfmt =
_(
"%u pages from table (%.2f%% of total) had %" PRId64 " dead item identifiers removed\n");
1116 msgfmt =
_(
"%u pages from table (%.2f%% of total) have %" PRId64 " dead item identifiers\n");
1119 vacrel->lpdead_item_pages,
1123 for (
int i = 0;
i <
vacrel->nindexes;
i++)
1131 _(
"index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
1188 ngettext(
"memory usage: dead item storage %.2f MB accumulated across %d reset (limit %.2f MB each)\n",
1189 "memory usage: dead item storage %.2f MB accumulated across %d resets (limit %.2f MB each)\n",
1190 vacrel->num_dead_items_resets),
1191 (
double)
vacrel->total_dead_items_bytes / (1024 * 1024),
1192 vacrel->num_dead_items_resets,
1203 for (
int i = 0;
i <
vacrel->nindexes;
i++)
1257 vacrel->eager_scan_remaining_successes;
1275 vacrel->next_unskippable_eager_scanned =
false;
1299 void *per_buffer_data =
NULL;
1314 if (
vacrel->scanned_pages > 0 &&
1326 if (
vacrel->dead_items_info->num_items > 0 &&
1342 vacrel->consider_bypass_optimization =
false;
1372 vacrel->eager_scanned_pages++;
1465 if (
vacrel->eager_scan_remaining_successes > 0)
1466 vacrel->eager_scan_remaining_successes--;
1468 if (
vacrel->eager_scan_remaining_successes == 0)
1477 if (
vacrel->eager_scan_max_fails_per_region > 0)
1479 (
errmsg(
"disabling eager scanning after freezing %u eagerly scanned blocks of relation \"%s.%s.%s\"",
1489 vacrel->eager_scan_remaining_fails = 0;
1491 vacrel->eager_scan_max_fails_per_region = 0;
1494 else if (
vacrel->eager_scan_remaining_fails > 0)
1495 vacrel->eager_scan_remaining_fails--;
1517 if (
vacrel->nindexes == 0
1518 || !
vacrel->do_index_vacuuming
1566 vacrel->missed_dead_tuples;
1574 if (
vacrel->dead_items_info->num_items > 0)
1620 void *callback_private_data,
1621 void *per_buffer_data)
1674 vacrel->skippedallvis =
true;
1688 *((
bool *) per_buffer_data) =
false;
1689 return vacrel->current_block;
1700 *((
bool *) per_buffer_data) =
vacrel->next_unskippable_eager_scanned;
1701 return vacrel->current_block;
1723 Buffer next_unskippable_vmbuffer =
vacrel->next_unskippable_vmbuffer;
1724 bool next_unskippable_eager_scanned =
false;
1728 for (;; next_unskippable_block++)
1731 next_unskippable_block,
1732 &next_unskippable_vmbuffer);
1741 if (next_unskippable_block >=
vacrel->next_eager_scan_region_start)
1743 vacrel->eager_scan_remaining_fails =
1744 vacrel->eager_scan_max_fails_per_region;
1768 if (next_unskippable_block == rel_pages - 1)
1794 if (
vacrel->eager_scan_remaining_fails > 0)
1796 next_unskippable_eager_scanned =
true;
1808 vacrel->next_unskippable_block = next_unskippable_block;
1809 vacrel->next_unskippable_eager_scanned = next_unskippable_eager_scanned;
1810 vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
1942 vacrel->vm_new_visible_pages++;
1943 vacrel->vm_new_visible_frozen_pages++;
2001 errmsg(
"page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
2027 errmsg(
"page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
2071 .vistest =
vacrel->vistest,
2072 .cutoffs = &
vacrel->cutoffs,
2094 if (
vacrel->nindexes == 0)
2113 vacrel->new_frozen_tuple_pages++;
2123#ifdef USE_ASSERT_CHECKING
2147 vacrel->lpdead_item_pages++;
2166 vacrel->recently_dead_tuples +=
presult.recently_dead_tuples;
2170 vacrel->nonempty_pages = blkno + 1;
2181 presult.lpdead_items, vmbuffer,
2222 vmbuffer,
presult.vm_conflict_horizon,
2231 vacrel->vm_new_visible_pages++;
2234 vacrel->vm_new_visible_frozen_pages++;
2241 vacrel->vm_new_frozen_pages++;
2279 recently_dead_tuples,
2293 recently_dead_tuples = 0;
2294 missed_dead_tuples = 0;
2322 deadoffsets[lpdead_items++] = offnum;
2329 &NoFreezePageRelfrozenXid,
2330 &NoFreezePageRelminMxid))
2383 missed_dead_tuples++;
2390 recently_dead_tuples++;
2399 elog(
ERROR,
"unexpected HeapTupleSatisfiesVacuum result");
2411 vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
2412 vacrel->NewRelminMxid = NoFreezePageRelminMxid;
2415 if (
vacrel->nindexes == 0)
2418 if (lpdead_items > 0)
2429 missed_dead_tuples += lpdead_items;
2432 else if (lpdead_items > 0)
2439 vacrel->lpdead_item_pages++;
2443 vacrel->lpdead_items += lpdead_items;
2449 vacrel->live_tuples += live_tuples;
2450 vacrel->recently_dead_tuples += recently_dead_tuples;
2451 vacrel->missed_dead_tuples += missed_dead_tuples;
2452 if (missed_dead_tuples > 0)
2453 vacrel->missed_dead_pages++;
2457 vacrel->nonempty_pages = blkno + 1;
2488 if (!
vacrel->do_index_vacuuming)
2515 if (
vacrel->consider_bypass_optimization &&
vacrel->rel_pages > 0)
2563 vacrel->do_index_vacuuming =
false;
2667 vacrel->num_index_scans);
2685 vacrel->dead_items_info->num_items ==
vacrel->lpdead_items);
2695 vacrel->num_index_scans++;
2713 void *callback_private_data,
2714 void *per_buffer_data)
2847 (
vacrel->dead_items_info->num_items ==
vacrel->lpdead_items &&
2851 (
errmsg(
"table \"%s\": removed %" PRId64 " dead item identifiers in %u pages",
2899 vacrel->cutoffs.OldestXmin,
2901 &all_frozen, &visibility_cutoff_xid,
2930 unused[nunused++] =
toff;
2947 vacrel->rel->rd_locator);
2977 vacrel->vm_new_visible_pages++;
2979 vacrel->vm_new_visible_frozen_pages++;
3024 vacrel->do_index_vacuuming =
false;
3025 vacrel->do_index_cleanup =
false;
3026 vacrel->do_rel_truncate =
false;
3032 (
errmsg(
"bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
3034 vacrel->num_index_scans),
3035 errdetail(
"The table's relfrozenxid or relminmxid is too far in the past."),
3036 errhint(
"Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
3037 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
3055 double reltuples =
vacrel->new_rel_tuples;
3056 bool estimated_count =
vacrel->scanned_pages <
vacrel->rel_pages;
3088 estimated_count,
vacrel);
3129 ivinfo.analyze_only =
false;
3130 ivinfo.report_progress =
false;
3131 ivinfo.estimated_count =
true;
3133 ivinfo.num_heap_tuples = reltuples;
3150 vacrel->dead_items_info);
3171 double reltuples,
bool estimated_count,
3179 ivinfo.analyze_only =
false;
3180 ivinfo.report_progress =
false;
3181 ivinfo.estimated_count = estimated_count;
3184 ivinfo.num_heap_tuples = reltuples;
3298 (
errmsg(
"\"%s\": stopping truncate due to conflicting lock request",
3368 (
errmsg(
"table \"%s\": truncated %u to %u pages",
3384 "prefetch size must be power of 2");
3399 blkno =
vacrel->rel_pages;
3401 while (blkno >
vacrel->nonempty_pages)
3417 if ((blkno % 32) == 0)
3431 (
errmsg(
"table \"%s\": suspending truncate due to conflicting lock request",
3513 return vacrel->nonempty_pages;
3536 if (nworkers >= 0 &&
vacrel->nindexes > 1 &&
vacrel->do_index_vacuuming)
3550 (
errmsg(
"disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3555 vacrel->nindexes, nworkers,
3567 &
vacrel->dead_items_info);
3580 vacrel->dead_items_info = dead_items_info;
3614 vacrel->num_dead_items_resets++;
3621 &
vacrel->dead_items_info);
3630 vacrel->dead_items_info->num_items = 0;
3650#ifdef USE_ASSERT_CHECKING
3669 visibility_cutoff_xid,
3717 bool all_visible =
true;
3725#ifdef USE_ASSERT_CHECKING
3730 Assert(deadoffsets[
i - 1] < deadoffsets[
i]);
3736 offnum <= maxoff && all_visible;
3765 *all_frozen = all_visible =
false;
3789 all_visible =
false;
3790 *all_frozen =
false;
3801 all_visible =
false;
3802 *all_frozen =
false;
3809 *visibility_cutoff_xid = xmin;
3812 if (all_visible && *all_frozen &&
3814 *all_frozen =
false;
3823 all_visible =
false;
3824 *all_frozen =
false;
3828 elog(
ERROR,
"unexpected HeapTupleSatisfiesVacuum result");
3846 int nindexes =
vacrel->nindexes;
3851 for (
int idx = 0;
idx < nindexes;
idx++)
3888 errcontext(
"while scanning block %u offset %u of relation \"%s.%s\"",
3891 errcontext(
"while scanning block %u of relation \"%s.%s\"",
3895 errcontext(
"while scanning relation \"%s.%s\"",
3903 errcontext(
"while vacuuming block %u offset %u of relation \"%s.%s\"",
3906 errcontext(
"while vacuuming block %u of relation \"%s.%s\"",
3910 errcontext(
"while vacuuming relation \"%s.%s\"",
3915 errcontext(
"while vacuuming index \"%s\" of relation \"%s.%s\"",
3920 errcontext(
"while cleaning up index \"%s\" of relation \"%s.%s\"",
3926 errcontext(
"while truncating relation \"%s.%s\" to %u blocks",
Datum idx(PG_FUNCTION_ARGS)
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
TimestampTz GetCurrentTimestamp(void)
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
PgBackendStatus * MyBEEntry
#define InvalidBlockNumber
static bool BlockNumberIsValid(BlockNumber blockNumber)
void CheckBufferIsPinnedOnce(Buffer buffer)
BlockNumber BufferGetBlockNumber(Buffer buffer)
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
bool BufferIsLockedByMeInMode(Buffer buffer, BufferLockMode mode)
void ReleaseBuffer(Buffer buffer)
void UnlockReleaseBuffer(Buffer buffer)
void MarkBufferDirty(Buffer buffer)
void LockBufferForCleanup(Buffer buffer)
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
bool ConditionalLockBufferForCleanup(Buffer buffer)
#define RelationGetNumberOfBlocks(reln)
static Page BufferGetPage(Buffer buffer)
static void LockBuffer(Buffer buffer, BufferLockMode mode)
static bool BufferIsValid(Buffer bufnum)
Size PageGetHeapFreeSpace(const PageData *page)
void PageTruncateLinePointerArray(Page page)
static bool PageIsEmpty(const PageData *page)
static bool PageIsAllVisible(const PageData *page)
static void PageClearAllVisible(Page page)
static bool PageIsNew(const PageData *page)
#define SizeOfPageHeaderData
static void PageSetAllVisible(Page page)
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
static XLogRecPtr PageGetLSN(const PageData *page)
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
#define ngettext(s, p, n)
#define Assert(condition)
TransactionId MultiXactId
#define StaticAssertDecl(condition, errmessage)
int errmsg_internal(const char *fmt,...)
int errdetail(const char *fmt,...)
ErrorContextCallback * error_context_stack
int errhint(const char *fmt,...)
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
#define palloc_object(type)
#define palloc_array(type, count)
#define palloc0_object(type)
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
volatile uint32 CritSectionCount
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
#define HEAP_PAGE_PRUNE_FREEZE
@ HEAPTUPLE_RECENTLY_DEAD
@ HEAPTUPLE_INSERT_IN_PROGRESS
@ HEAPTUPLE_DELETE_IN_PROGRESS
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
#define MaxHeapTuplesPerPage
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
#define INSTR_TIME_SET_CURRENT(t)
#define INSTR_TIME_SUBTRACT(x, y)
#define INSTR_TIME_GET_MICROSEC(t)
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
BufferUsage pgBufferUsage
void BufferUsageAccumDiff(BufferUsage *dst, const BufferUsage *add, const BufferUsage *sub)
static int pg_cmp_u16(uint16 a, uint16 b)
#define ItemIdGetLength(itemId)
#define ItemIdIsNormal(itemId)
#define ItemIdIsDead(itemId)
#define ItemIdIsUsed(itemId)
#define ItemIdSetUnused(itemId)
#define ItemIdIsRedirected(itemId)
#define ItemIdHasStorage(itemId)
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
void ResetLatch(Latch *latch)
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
void UnlockRelation(Relation relation, LOCKMODE lockmode)
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
#define AccessExclusiveLock
char * get_database_name(Oid dbid)
char * get_namespace_name(Oid nspid)
char * pstrdup(const char *in)
void pfree(void *pointer)
void * palloc0(Size size)
#define AmAutoVacuumWorkerProcess()
#define START_CRIT_SECTION()
#define CHECK_FOR_INTERRUPTS()
#define END_CRIT_SECTION()
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
#define MultiXactIdIsValid(multi)
#define InvalidMultiXactId
#define InvalidOffsetNumber
#define OffsetNumberIsValid(offsetNumber)
#define OffsetNumberNext(offsetNumber)
#define FirstOffsetNumber
#define ERRCODE_DATA_CORRUPTED
uint32 pg_prng_uint32(pg_prng_state *state)
pg_prng_state pg_global_prng_state
const char * pg_rusage_show(const PGRUsage *ru0)
void pg_rusage_init(PGRUsage *ru0)
static char buf[DEFAULT_XLOG_SEG_SIZE]
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Relation rel, PgStat_Counter livetuples, PgStat_Counter deadtuples, TimestampTz starttime)
#define qsort(a, b, c, d)
GlobalVisState * GlobalVisTestFor(Relation rel)
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
#define PROGRESS_VACUUM_MODE
#define PROGRESS_VACUUM_MODE_NORMAL
#define PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM
#define PROGRESS_VACUUM_DEAD_TUPLE_BYTES
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
#define PROGRESS_VACUUM_PHASE
#define PROGRESS_VACUUM_DELAY_TIME
#define PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM_WRAPAROUND
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
#define PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS
#define PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES
#define PROGRESS_VACUUM_STARTED_BY_MANUAL
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
#define PROGRESS_VACUUM_STARTED_BY
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
#define PROGRESS_VACUUM_MODE_FAILSAFE
#define PROGRESS_VACUUM_INDEXES_PROCESSED
#define PROGRESS_VACUUM_INDEXES_TOTAL
#define PROGRESS_VACUUM_MODE_AGGRESSIVE
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
#define PROGRESS_VACUUM_PHASE_TRUNCATE
void heap_page_prune_and_freeze(PruneFreezeParams *params, PruneFreezeResult *presult, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, Buffer vmbuffer, uint8 vmflags, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
void read_stream_end(ReadStream *stream)
#define READ_STREAM_MAINTENANCE
#define READ_STREAM_USE_BATCHING
#define RelationGetRelid(relation)
#define RelationGetRelationName(relation)
#define RelationNeedsWAL(relation)
#define RelationUsesLocalBuffers(relation)
#define RelationGetNamespace(relation)
void RelationTruncate(Relation rel, BlockNumber nblocks)
void appendStringInfo(StringInfo str, const char *fmt,...)
void appendStringInfoString(StringInfo str, const char *s)
void initStringInfo(StringInfo str)
int64 shared_blks_dirtied
struct ErrorContextCallback * previous
void(* callback)(void *arg)
BlockNumber pages_deleted
BlockNumber pages_newly_deleted
BlockNumber next_eager_scan_region_start
ParallelVacuumState * pvs
bool next_unskippable_eager_scanned
VacDeadItemsInfo * dead_items_info
BlockNumber vm_new_frozen_pages
Buffer next_unskippable_vmbuffer
BlockNumber nonempty_pages
BlockNumber eager_scan_remaining_fails
BlockNumber scanned_pages
int num_dead_items_resets
BlockNumber new_frozen_tuple_pages
BlockNumber removed_pages
IndexBulkDeleteResult ** indstats
TransactionId NewRelfrozenXid
bool consider_bypass_optimization
Size total_dead_items_bytes
BlockNumber next_unskippable_block
int64 recently_dead_tuples
BlockNumber missed_dead_pages
BlockNumber current_block
BufferAccessStrategy bstrategy
BlockNumber eager_scan_remaining_successes
BlockNumber lpdead_item_pages
BlockNumber eager_scanned_pages
MultiXactId NewRelminMxid
struct VacuumCutoffs cutoffs
BlockNumber vm_new_visible_pages
BlockNumber eager_scan_max_fails_per_region
BlockNumber vm_new_visible_frozen_pages
int64 st_progress_param[PGSTAT_NUM_PROGRESS_PARAM]
int log_vacuum_min_duration
VacOptValue index_cleanup
double max_eager_freeze_failure_rate
TidStoreIter * TidStoreBeginIterate(TidStore *ts)
void TidStoreEndIterate(TidStoreIter *iter)
TidStoreIterResult * TidStoreIterateNext(TidStoreIter *iter)
TidStore * TidStoreCreateLocal(size_t max_bytes, bool insert_only)
void TidStoreDestroy(TidStore *ts)
int TidStoreGetBlockOffsets(TidStoreIterResult *result, OffsetNumber *offsets, int max_offsets)
void TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
size_t TidStoreMemoryUsage(TidStore *ts)
static bool TransactionIdFollows(TransactionId id1, TransactionId id2)
static TransactionId ReadNextTransactionId(void)
#define InvalidTransactionId
static bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
#define TransactionIdIsValid(xid)
#define TransactionIdIsNormal(xid)
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
bool track_cost_delay_timing
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
void vacuum_delay_point(bool is_analyze)
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
bool VacuumFailsafeActive
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, BlockNumber num_all_frozen_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
bool vacuum_get_cutoffs(Relation rel, const VacuumParams params, struct VacuumCutoffs *cutoffs)
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, TidStore *dead_items, VacDeadItemsInfo *dead_items_info)
@ VACOPTVALUE_UNSPECIFIED
#define VACOPT_DISABLE_PAGE_SKIPPING
static int lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool *has_lpdead_items, bool *vm_page_frozen)
static void dead_items_cleanup(LVRelState *vacrel)
static void identify_and_fix_vm_corruption(Relation rel, Buffer heap_buffer, BlockNumber heap_blk, Page heap_page, int nlpdead_items, Buffer vmbuffer, uint8 *vmbits)
static void update_relstats_all_indexes(LVRelState *vacrel)
static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
void heap_vacuum_rel(Relation rel, const VacuumParams params, BufferAccessStrategy bstrategy)
static BlockNumber heap_vac_scan_next_block(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
static void heap_vacuum_eager_scan_setup(LVRelState *vacrel, const VacuumParams params)
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
static void vacuum_error_callback(void *arg)
static bool heap_page_would_be_all_visible(Relation rel, Buffer buf, TransactionId OldestXmin, OffsetNumber *deadoffsets, int ndeadoffsets, bool *all_frozen, TransactionId *visibility_cutoff_xid, OffsetNumber *logging_offnum)
#define EAGER_SCAN_REGION_SIZE
static void lazy_truncate_heap(LVRelState *vacrel)
static void lazy_vacuum(LVRelState *vacrel)
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
#define MAX_EAGER_FREEZE_SUCCESS_RATE
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
static BlockNumber vacuum_reap_lp_read_stream_next(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
#define REL_TRUNCATE_MINIMUM
static bool should_attempt_truncation(LVRelState *vacrel)
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
@ VACUUM_ERRCB_PHASE_TRUNCATE
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
@ VACUUM_ERRCB_PHASE_UNKNOWN
static void lazy_scan_heap(LVRelState *vacrel)
#define ParallelVacuumIsActive(vacrel)
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
static void dead_items_reset(LVRelState *vacrel)
#define REL_TRUNCATE_FRACTION
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
static void lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
#define BYPASS_THRESHOLD_PAGES
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
#define SKIP_PAGES_THRESHOLD
#define FAILSAFE_EVERY_PAGES
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
static int cmpOffsetNumbers(const void *a, const void *b)
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
#define VACUUM_FSM_EVERY_PAGES
TidStore * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, VacDeadItemsInfo **dead_items_info_p)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int vac_work_mem, int elevel, BufferAccessStrategy bstrategy)
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)
void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
void visibilitymap_set_vmbits(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
#define WL_EXIT_ON_PM_DEATH
bool IsInParallelMode(void)
#define XLogRecPtrIsValid(r)
#define InvalidXLogRecPtr
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)