169#define REL_TRUNCATE_MINIMUM 1000
170#define REL_TRUNCATE_FRACTION 16
179#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20
180#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50
181#define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000
187#define BYPASS_THRESHOLD_PAGES 0.02
193#define FAILSAFE_EVERY_PAGES \
194 ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
202#define VACUUM_FSM_EVERY_PAGES \
203 ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
209#define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
215#define PREFETCH_SIZE ((BlockNumber) 32)
221#define ParallelVacuumIsActive(vacrel) ((vacrel)->pvs != NULL)
241#define MAX_EAGER_FREEZE_SUCCESS_RATE 0.2
250#define EAGER_SCAN_REGION_SIZE 4096
430 void *callback_private_data,
431 void *per_buffer_data);
458 bool estimated_count,
511 vacrel->eager_scan_max_fails_per_region = 0;
512 vacrel->eager_scan_remaining_fails = 0;
513 vacrel->eager_scan_remaining_successes = 0;
554 vacrel->cutoffs.FreezeLimit))
560 vacrel->cutoffs.MultiXactCutoff))
574 vacrel->eager_scan_remaining_successes =
579 if (
vacrel->eager_scan_remaining_successes == 0)
596 vacrel->eager_scan_max_fails_per_region =
607 vacrel->eager_scan_remaining_fails =
608 vacrel->eager_scan_max_fails_per_region *
702 vacrel->bstrategy = bstrategy;
703 if (instrument &&
vacrel->nindexes > 0)
707 for (
int i = 0;
i <
vacrel->nindexes;
i++)
729 vacrel->consider_bypass_optimization =
true;
730 vacrel->do_index_vacuuming =
true;
731 vacrel->do_index_cleanup =
true;
736 vacrel->do_index_vacuuming =
false;
737 vacrel->do_index_cleanup =
false;
742 vacrel->consider_bypass_optimization =
false;
751 vacrel->scanned_pages = 0;
752 vacrel->eager_scanned_pages = 0;
753 vacrel->removed_pages = 0;
754 vacrel->new_frozen_tuple_pages = 0;
755 vacrel->lpdead_item_pages = 0;
756 vacrel->missed_dead_pages = 0;
757 vacrel->nonempty_pages = 0;
761 vacrel->new_rel_tuples = 0;
762 vacrel->new_live_tuples = 0;
767 vacrel->num_index_scans = 0;
768 vacrel->num_dead_items_resets = 0;
769 vacrel->total_dead_items_bytes = 0;
770 vacrel->tuples_deleted = 0;
771 vacrel->tuples_frozen = 0;
774 vacrel->recently_dead_tuples = 0;
775 vacrel->missed_dead_tuples = 0;
777 vacrel->new_all_visible_pages = 0;
778 vacrel->new_all_visible_all_frozen_pages = 0;
779 vacrel->new_all_frozen_pages = 0;
781 vacrel->worker_usage.vacuum.nlaunched = 0;
782 vacrel->worker_usage.vacuum.nplanned = 0;
783 vacrel->worker_usage.cleanup.nlaunched = 0;
784 vacrel->worker_usage.cleanup.nplanned = 0;
815 vacrel->skippedallvis =
false;
823 vacrel->aggressive =
true;
827 vacrel->skipwithvm = skipwithvm;
846 (
errmsg(
"aggressively vacuuming \"%s.%s.%s\"",
851 (
errmsg(
"vacuuming \"%s.%s.%s\"",
866#ifdef USE_INJECTION_POINTS
904 if (
vacrel->do_index_cleanup)
930 vacrel->cutoffs.relfrozenxid,
931 vacrel->NewRelfrozenXid));
934 vacrel->cutoffs.relminmxid,
936 if (
vacrel->skippedallvis)
990 vacrel->recently_dead_tuples +
991 vacrel->missed_dead_tuples,
1037 msgfmt =
_(
"finished vacuuming \"%s.%s.%s\": index scans: %d\n");
1048 msgfmt =
_(
"automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1050 msgfmt =
_(
"automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1055 msgfmt =
_(
"automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
1057 msgfmt =
_(
"automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
1063 vacrel->num_index_scans);
1064 appendStringInfo(&
buf,
_(
"pages: %u removed, %u remain, %u scanned (%.2f%% of total), %u eagerly scanned\n"),
1069 100.0 *
vacrel->scanned_pages /
1071 vacrel->eager_scanned_pages);
1073 _(
"tuples: %" PRId64 " removed, %" PRId64 " remain, %" PRId64 " are dead but not yet removable\n"),
1076 vacrel->recently_dead_tuples);
1077 if (
vacrel->missed_dead_tuples > 0)
1079 _(
"tuples missed: %" PRId64 " dead from %u pages not removed due to cleanup lock contention\n"),
1080 vacrel->missed_dead_tuples,
1081 vacrel->missed_dead_pages);
1083 vacrel->cutoffs.OldestXmin);
1085 _(
"removable cutoff: %u, which was %d XIDs old when operation ended\n"),
1090 vacrel->cutoffs.relfrozenxid);
1092 _(
"new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
1098 vacrel->cutoffs.relminmxid);
1100 _(
"new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
1104 vacrel->new_frozen_tuple_pages,
1106 100.0 *
vacrel->new_frozen_tuple_pages /
1111 _(
"visibility map: %u pages set all-visible, %u pages set all-frozen (%u were all-visible)\n"),
1112 vacrel->new_all_visible_pages,
1113 vacrel->new_all_visible_all_frozen_pages +
1114 vacrel->new_all_frozen_pages,
1115 vacrel->new_all_frozen_pages);
1116 if (
vacrel->do_index_vacuuming)
1118 if (
vacrel->nindexes == 0 ||
vacrel->num_index_scans == 0)
1123 msgfmt =
_(
"%u pages from table (%.2f%% of total) had %" PRId64 " dead item identifiers removed\n");
1132 msgfmt =
_(
"%u pages from table (%.2f%% of total) have %" PRId64 " dead item identifiers\n");
1135 vacrel->lpdead_item_pages,
1140 if (
vacrel->worker_usage.vacuum.nplanned > 0)
1142 _(
"parallel workers: index vacuum: %d planned, %d launched in total\n"),
1143 vacrel->worker_usage.vacuum.nplanned,
1144 vacrel->worker_usage.vacuum.nlaunched);
1146 if (
vacrel->worker_usage.cleanup.nplanned > 0)
1148 _(
"parallel workers: index cleanup: %d planned, %d launched\n"),
1149 vacrel->worker_usage.cleanup.nplanned,
1150 vacrel->worker_usage.cleanup.nlaunched);
1152 for (
int i = 0;
i <
vacrel->nindexes;
i++)
1160 _(
"index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
1217 ngettext(
"memory usage: dead item storage %.2f MB accumulated across %d reset (limit %.2f MB each)\n",
1218 "memory usage: dead item storage %.2f MB accumulated across %d resets (limit %.2f MB each)\n",
1219 vacrel->num_dead_items_resets),
1220 (
double)
vacrel->total_dead_items_bytes / (1024 * 1024),
1221 vacrel->num_dead_items_resets,
1232 for (
int i = 0;
i <
vacrel->nindexes;
i++)
1286 vacrel->eager_scan_remaining_successes;
1304 vacrel->next_unskippable_eager_scanned =
false;
1328 void *per_buffer_data =
NULL;
1343 if (
vacrel->scanned_pages > 0 &&
1355 if (
vacrel->dead_items_info->num_items > 0 &&
1371 vacrel->consider_bypass_optimization =
false;
1401 vacrel->eager_scanned_pages++;
1494 if (
vacrel->eager_scan_remaining_successes > 0)
1495 vacrel->eager_scan_remaining_successes--;
1497 if (
vacrel->eager_scan_remaining_successes == 0)
1506 if (
vacrel->eager_scan_max_fails_per_region > 0)
1508 (
errmsg(
"disabling eager scanning after freezing %u eagerly scanned blocks of relation \"%s.%s.%s\"",
1518 vacrel->eager_scan_remaining_fails = 0;
1520 vacrel->eager_scan_max_fails_per_region = 0;
1523 else if (
vacrel->eager_scan_remaining_fails > 0)
1524 vacrel->eager_scan_remaining_fails--;
1546 if (
vacrel->nindexes == 0
1547 || !
vacrel->do_index_vacuuming
1595 vacrel->missed_dead_tuples;
1603 if (
vacrel->dead_items_info->num_items > 0)
1649 void *callback_private_data,
1650 void *per_buffer_data)
1703 vacrel->skippedallvis =
true;
1717 *((
bool *) per_buffer_data) =
false;
1718 return vacrel->current_block;
1729 *((
bool *) per_buffer_data) =
vacrel->next_unskippable_eager_scanned;
1730 return vacrel->current_block;
1752 Buffer next_unskippable_vmbuffer =
vacrel->next_unskippable_vmbuffer;
1753 bool next_unskippable_eager_scanned =
false;
1757 for (;; next_unskippable_block++)
1760 next_unskippable_block,
1761 &next_unskippable_vmbuffer);
1770 if (next_unskippable_block >=
vacrel->next_eager_scan_region_start)
1772 vacrel->eager_scan_remaining_fails =
1773 vacrel->eager_scan_max_fails_per_region;
1797 if (next_unskippable_block == rel_pages - 1)
1823 if (
vacrel->eager_scan_remaining_fails > 0)
1825 next_unskippable_eager_scanned =
true;
1837 vacrel->next_unskippable_block = next_unskippable_block;
1838 vacrel->next_unskippable_eager_scanned = next_unskippable_eager_scanned;
1839 vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
1958 vacrel->rel->rd_locator);
1982 vacrel->new_all_visible_pages++;
1983 vacrel->new_all_visible_all_frozen_pages++;
2034 .vmbuffer = vmbuffer,
2037 .vistest =
vacrel->vistest,
2038 .cutoffs = &
vacrel->cutoffs,
2058 if (
vacrel->nindexes == 0)
2087 vacrel->new_frozen_tuple_pages++;
2095 vacrel->lpdead_item_pages++;
2110 if (
presult.newly_all_visible)
2111 vacrel->new_all_visible_pages++;
2112 if (
presult.newly_all_visible_frozen)
2113 vacrel->new_all_visible_all_frozen_pages++;
2115 vacrel->new_all_frozen_pages++;
2125 vacrel->recently_dead_tuples +=
presult.recently_dead_tuples;
2129 vacrel->nonempty_pages = blkno + 1;
2168 recently_dead_tuples,
2182 recently_dead_tuples = 0;
2183 missed_dead_tuples = 0;
2211 deadoffsets[lpdead_items++] = offnum;
2218 &NoFreezePageRelfrozenXid,
2219 &NoFreezePageRelminMxid))
2272 missed_dead_tuples++;
2279 recently_dead_tuples++;
2288 elog(
ERROR,
"unexpected HeapTupleSatisfiesVacuum result");
2300 vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
2301 vacrel->NewRelminMxid = NoFreezePageRelminMxid;
2304 if (
vacrel->nindexes == 0)
2307 if (lpdead_items > 0)
2318 missed_dead_tuples += lpdead_items;
2321 else if (lpdead_items > 0)
2328 vacrel->lpdead_item_pages++;
2332 vacrel->lpdead_items += lpdead_items;
2338 vacrel->live_tuples += live_tuples;
2339 vacrel->recently_dead_tuples += recently_dead_tuples;
2340 vacrel->missed_dead_tuples += missed_dead_tuples;
2341 if (missed_dead_tuples > 0)
2342 vacrel->missed_dead_pages++;
2346 vacrel->nonempty_pages = blkno + 1;
2377 if (!
vacrel->do_index_vacuuming)
2404 if (
vacrel->consider_bypass_optimization &&
vacrel->rel_pages > 0)
2452 vacrel->do_index_vacuuming =
false;
2557 &(
vacrel->worker_usage.vacuum));
2575 vacrel->dead_items_info->num_items ==
vacrel->lpdead_items);
2585 vacrel->num_index_scans++;
2603 void *callback_private_data,
2604 void *per_buffer_data)
2737 (
vacrel->dead_items_info->num_items ==
vacrel->lpdead_items &&
2741 (
errmsg(
"table \"%s\": removed %" PRId64 " dead item identifiers in %u pages",
2791 &all_frozen, &newest_live_xid,
2820 unused[nunused++] =
toff;
2838 vacrel->rel->rd_locator);
2868 vacrel->new_all_visible_pages++;
2870 vacrel->new_all_visible_all_frozen_pages++;
2915 vacrel->do_index_vacuuming =
false;
2916 vacrel->do_index_cleanup =
false;
2917 vacrel->do_rel_truncate =
false;
2923 (
errmsg(
"bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2925 vacrel->num_index_scans),
2926 errdetail(
"The table's relfrozenxid or relminmxid is too far in the past."),
2927 errhint(
"Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2928 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2946 double reltuples =
vacrel->new_rel_tuples;
2947 bool estimated_count =
vacrel->scanned_pages <
vacrel->rel_pages;
2979 estimated_count,
vacrel);
2992 &(
vacrel->worker_usage.cleanup));
3021 ivinfo.analyze_only =
false;
3022 ivinfo.report_progress =
false;
3023 ivinfo.estimated_count =
true;
3025 ivinfo.num_heap_tuples = reltuples;
3042 vacrel->dead_items_info);
3063 double reltuples,
bool estimated_count,
3071 ivinfo.analyze_only =
false;
3072 ivinfo.report_progress =
false;
3073 ivinfo.estimated_count = estimated_count;
3076 ivinfo.num_heap_tuples = reltuples;
3190 (
errmsg(
"\"%s\": stopping truncate due to conflicting lock request",
3260 (
errmsg(
"table \"%s\": truncated %u to %u pages",
3276 "prefetch size must be power of 2");
3291 blkno =
vacrel->rel_pages;
3293 while (blkno >
vacrel->nonempty_pages)
3309 if ((blkno % 32) == 0)
3323 (
errmsg(
"table \"%s\": suspending truncate due to conflicting lock request",
3405 return vacrel->nonempty_pages;
3428 if (nworkers >= 0 &&
vacrel->nindexes > 1 &&
vacrel->do_index_vacuuming)
3442 (
errmsg(
"disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3447 vacrel->nindexes, nworkers,
3459 &
vacrel->dead_items_info);
3472 vacrel->dead_items_info = dead_items_info;
3506 vacrel->num_dead_items_resets++;
3513 &
vacrel->dead_items_info);
3522 vacrel->dead_items_info->num_items = 0;
3542#ifdef USE_ASSERT_CHECKING
3617 bool all_visible =
true;
3625#ifdef USE_ASSERT_CHECKING
3630 Assert(deadoffsets[
i - 1] < deadoffsets[
i]);
3636 offnum <= maxoff && all_visible;
3666 *all_frozen = all_visible =
false;
3690 all_visible =
false;
3691 *all_frozen =
false;
3711 *newest_live_xid = xmin;
3714 if (all_visible && *all_frozen &&
3716 *all_frozen =
false;
3725 all_visible =
false;
3726 *all_frozen =
false;
3730 elog(
ERROR,
"unexpected HeapTupleSatisfiesVacuum result");
3745 all_visible =
false;
3746 *all_frozen =
false;
3762 int nindexes =
vacrel->nindexes;
3767 for (
int idx = 0;
idx < nindexes;
idx++)
3804 errcontext(
"while scanning block %u offset %u of relation \"%s.%s\"",
3807 errcontext(
"while scanning block %u of relation \"%s.%s\"",
3811 errcontext(
"while scanning relation \"%s.%s\"",
3819 errcontext(
"while vacuuming block %u offset %u of relation \"%s.%s\"",
3822 errcontext(
"while vacuuming block %u of relation \"%s.%s\"",
3826 errcontext(
"while vacuuming relation \"%s.%s\"",
3831 errcontext(
"while vacuuming index \"%s\" of relation \"%s.%s\"",
3836 errcontext(
"while cleaning up index \"%s\" of relation \"%s.%s\"",
3842 errcontext(
"while truncating relation \"%s.%s\" to %u blocks",
Datum idx(PG_FUNCTION_ARGS)
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
TimestampTz GetCurrentTimestamp(void)
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
PgBackendStatus * MyBEEntry
#define InvalidBlockNumber
static bool BlockNumberIsValid(BlockNumber blockNumber)
void CheckBufferIsPinnedOnce(Buffer buffer)
BlockNumber BufferGetBlockNumber(Buffer buffer)
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
void ReleaseBuffer(Buffer buffer)
void UnlockReleaseBuffer(Buffer buffer)
void MarkBufferDirty(Buffer buffer)
void LockBufferForCleanup(Buffer buffer)
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
bool ConditionalLockBufferForCleanup(Buffer buffer)
#define RelationGetNumberOfBlocks(reln)
static Page BufferGetPage(Buffer buffer)
static void LockBuffer(Buffer buffer, BufferLockMode mode)
static bool BufferIsValid(Buffer bufnum)
Size PageGetHeapFreeSpace(const PageData *page)
void PageTruncateLinePointerArray(Page page)
static bool PageIsEmpty(const PageData *page)
static bool PageIsAllVisible(const PageData *page)
static bool PageIsNew(const PageData *page)
#define SizeOfPageHeaderData
static void PageSetAllVisible(Page page)
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
#define PageClearPrunable(page)
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
#define ngettext(s, p, n)
#define Assert(condition)
TransactionId MultiXactId
#define StaticAssertDecl(condition, errmessage)
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
ErrorContextCallback * error_context_stack
int errhint(const char *fmt,...) pg_attribute_printf(1
int errdetail(const char *fmt,...) pg_attribute_printf(1
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define ereport(elevel,...)
#define palloc_object(type)
#define palloc_array(type, count)
#define palloc0_object(type)
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
volatile uint32 CritSectionCount
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
#define HEAP_PAGE_PRUNE_FREEZE
#define HEAP_PAGE_PRUNE_ALLOW_FAST_PATH
@ HEAPTUPLE_RECENTLY_DEAD
@ HEAPTUPLE_INSERT_IN_PROGRESS
@ HEAPTUPLE_DELETE_IN_PROGRESS
#define HEAP_PAGE_PRUNE_SET_VM
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer, TransactionId *dead_after)
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
#define MaxHeapTuplesPerPage
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
#define INJECTION_POINT(name, arg)
#define INSTR_TIME_SET_CURRENT(t)
#define INSTR_TIME_SUBTRACT(x, y)
#define INSTR_TIME_GET_MICROSEC(t)
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
BufferUsage pgBufferUsage
void BufferUsageAccumDiff(BufferUsage *dst, const BufferUsage *add, const BufferUsage *sub)
static int pg_cmp_u16(uint16 a, uint16 b)
#define ItemIdGetLength(itemId)
#define ItemIdIsNormal(itemId)
#define ItemIdIsDead(itemId)
#define ItemIdIsUsed(itemId)
#define ItemIdSetUnused(itemId)
#define ItemIdIsRedirected(itemId)
#define ItemIdHasStorage(itemId)
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
void ResetLatch(Latch *latch)
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
void UnlockRelation(Relation relation, LOCKMODE lockmode)
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
#define AccessExclusiveLock
char * get_database_name(Oid dbid)
char * get_namespace_name(Oid nspid)
char * pstrdup(const char *in)
void pfree(void *pointer)
void * palloc0(Size size)
#define AmAutoVacuumWorkerProcess()
#define START_CRIT_SECTION()
#define CHECK_FOR_INTERRUPTS()
#define END_CRIT_SECTION()
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
#define MultiXactIdIsValid(multi)
#define InvalidMultiXactId
#define InvalidOffsetNumber
#define OffsetNumberIsValid(offsetNumber)
#define OffsetNumberNext(offsetNumber)
#define FirstOffsetNumber
uint32 pg_prng_uint32(pg_prng_state *state)
pg_prng_state pg_global_prng_state
const char * pg_rusage_show(const PGRUsage *ru0)
void pg_rusage_init(PGRUsage *ru0)
static char buf[DEFAULT_XLOG_SEG_SIZE]
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Relation rel, PgStat_Counter livetuples, PgStat_Counter deadtuples, TimestampTz starttime)
#define qsort(a, b, c, d)
GlobalVisState * GlobalVisTestFor(Relation rel)
bool GlobalVisTestXidConsideredRunning(GlobalVisState *state, TransactionId xid, bool allow_update)
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
#define PROGRESS_VACUUM_MODE
#define PROGRESS_VACUUM_MODE_NORMAL
#define PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM
#define PROGRESS_VACUUM_DEAD_TUPLE_BYTES
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
#define PROGRESS_VACUUM_PHASE
#define PROGRESS_VACUUM_DELAY_TIME
#define PROGRESS_VACUUM_STARTED_BY_AUTOVACUUM_WRAPAROUND
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
#define PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS
#define PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES
#define PROGRESS_VACUUM_STARTED_BY_MANUAL
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
#define PROGRESS_VACUUM_STARTED_BY
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
#define PROGRESS_VACUUM_MODE_FAILSAFE
#define PROGRESS_VACUUM_INDEXES_PROCESSED
#define PROGRESS_VACUUM_INDEXES_TOTAL
#define PROGRESS_VACUUM_MODE_AGGRESSIVE
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
#define PROGRESS_VACUUM_PHASE_TRUNCATE
void heap_page_prune_and_freeze(PruneFreezeParams *params, PruneFreezeResult *presult, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, Buffer vmbuffer, uint8 vmflags, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
void read_stream_end(ReadStream *stream)
#define READ_STREAM_MAINTENANCE
#define READ_STREAM_USE_BATCHING
#define RelationGetRelid(relation)
#define RelationGetRelationName(relation)
#define RelationNeedsWAL(relation)
#define RelationUsesLocalBuffers(relation)
#define RelationGetNamespace(relation)
void RelationTruncate(Relation rel, BlockNumber nblocks)
void appendStringInfo(StringInfo str, const char *fmt,...)
void appendStringInfoString(StringInfo str, const char *s)
void initStringInfo(StringInfo str)
int64 shared_blks_dirtied
struct ErrorContextCallback * previous
void(* callback)(void *arg)
BlockNumber pages_deleted
BlockNumber pages_newly_deleted
BlockNumber next_eager_scan_region_start
ParallelVacuumState * pvs
bool next_unskippable_eager_scanned
VacDeadItemsInfo * dead_items_info
PVWorkerUsage worker_usage
Buffer next_unskippable_vmbuffer
BlockNumber nonempty_pages
BlockNumber eager_scan_remaining_fails
BlockNumber scanned_pages
int num_dead_items_resets
BlockNumber new_frozen_tuple_pages
BlockNumber removed_pages
IndexBulkDeleteResult ** indstats
BlockNumber new_all_frozen_pages
BlockNumber new_all_visible_all_frozen_pages
BlockNumber new_all_visible_pages
TransactionId NewRelfrozenXid
bool consider_bypass_optimization
Size total_dead_items_bytes
BlockNumber next_unskippable_block
int64 recently_dead_tuples
BlockNumber missed_dead_pages
BlockNumber current_block
BufferAccessStrategy bstrategy
BlockNumber eager_scan_remaining_successes
BlockNumber lpdead_item_pages
BlockNumber eager_scanned_pages
MultiXactId NewRelminMxid
struct VacuumCutoffs cutoffs
BlockNumber eager_scan_max_fails_per_region
int64 st_progress_param[PGSTAT_NUM_PROGRESS_PARAM]
int log_vacuum_min_duration
VacOptValue index_cleanup
double max_eager_freeze_failure_rate
TidStoreIter * TidStoreBeginIterate(TidStore *ts)
void TidStoreEndIterate(TidStoreIter *iter)
TidStoreIterResult * TidStoreIterateNext(TidStoreIter *iter)
TidStore * TidStoreCreateLocal(size_t max_bytes, bool insert_only)
void TidStoreDestroy(TidStore *ts)
int TidStoreGetBlockOffsets(TidStoreIterResult *result, OffsetNumber *offsets, int max_offsets)
void TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
size_t TidStoreMemoryUsage(TidStore *ts)
static bool TransactionIdFollows(TransactionId id1, TransactionId id2)
static TransactionId ReadNextTransactionId(void)
#define InvalidTransactionId
static bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
#define TransactionIdIsValid(xid)
#define TransactionIdIsNormal(xid)
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
bool track_cost_delay_timing
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
void vacuum_delay_point(bool is_analyze)
bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params, struct VacuumCutoffs *cutoffs)
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
bool VacuumFailsafeActive
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, BlockNumber num_all_frozen_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, TidStore *dead_items, VacDeadItemsInfo *dead_items_info)
@ VACOPTVALUE_UNSPECIFIED
#define VACOPT_DISABLE_PAGE_SKIPPING
static int lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool *has_lpdead_items, bool *vm_page_frozen)
static void dead_items_cleanup(LVRelState *vacrel)
static void update_relstats_all_indexes(LVRelState *vacrel)
static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
static BlockNumber heap_vac_scan_next_block(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
static void vacuum_error_callback(void *arg)
#define EAGER_SCAN_REGION_SIZE
static void lazy_truncate_heap(LVRelState *vacrel)
static void lazy_vacuum(LVRelState *vacrel)
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
#define MAX_EAGER_FREEZE_SUCCESS_RATE
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
static BlockNumber vacuum_reap_lp_read_stream_next(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
#define REL_TRUNCATE_MINIMUM
static bool should_attempt_truncation(LVRelState *vacrel)
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
@ VACUUM_ERRCB_PHASE_TRUNCATE
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
@ VACUUM_ERRCB_PHASE_UNKNOWN
static void lazy_scan_heap(LVRelState *vacrel)
#define ParallelVacuumIsActive(vacrel)
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
static void dead_items_reset(LVRelState *vacrel)
#define REL_TRUNCATE_FRACTION
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
static void lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
static void heap_vacuum_eager_scan_setup(LVRelState *vacrel, const VacuumParams *params)
static bool heap_page_would_be_all_visible(Relation rel, Buffer buf, GlobalVisState *vistest, bool allow_update_vistest, OffsetNumber *deadoffsets, int ndeadoffsets, bool *all_frozen, TransactionId *newest_live_xid, OffsetNumber *logging_offnum)
void heap_vacuum_rel(Relation rel, const VacuumParams *params, BufferAccessStrategy bstrategy)
#define BYPASS_THRESHOLD_PAGES
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
#define SKIP_PAGES_THRESHOLD
#define FAILSAFE_EVERY_PAGES
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
static int cmpOffsetNumbers(const void *a, const void *b)
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
#define VACUUM_FSM_EVERY_PAGES
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count, PVWorkerStats *wstats)
TidStore * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, VacDeadItemsInfo **dead_items_info_p)
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, PVWorkerStats *wstats)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int vac_work_mem, int elevel, BufferAccessStrategy bstrategy)
void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)
void visibilitymap_set(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
#define WL_EXIT_ON_PM_DEATH
bool IsInParallelMode(void)