76 #define REL_TRUNCATE_MINIMUM 1000
77 #define REL_TRUNCATE_FRACTION 16
86 #define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20
87 #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50
88 #define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000
94 #define BYPASS_THRESHOLD_PAGES 0.02
100 #define FAILSAFE_EVERY_PAGES \
101 ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
109 #define VACUUM_FSM_EVERY_PAGES \
110 ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
116 #define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
122 #define PREFETCH_SIZE ((BlockNumber) 32)
128 #define ParallelVacuumIsActive(vacrel) ((vacrel)->pvs != NULL)
246 bool *next_unskippable_allvis,
247 bool *skipping_current_range);
250 bool sharelock,
Buffer vmbuffer);
256 bool *hastup,
bool *recordfreespace);
271 bool estimated_count,
276 bool *lock_waiter_detected);
324 char **indnames = NULL;
363 errcallback.
arg = vacrel;
372 if (instrument && vacrel->
nindexes > 0)
482 (
errmsg(
"aggressively vacuuming \"%s.%s.%s\"",
487 (
errmsg(
"vacuuming \"%s.%s.%s\"",
571 if (new_rel_allvisible > new_rel_pages)
572 new_rel_allvisible = new_rel_pages;
582 new_rel_allvisible, vacrel->
nindexes > 0,
584 &frozenxid_updated, &minmulti_updated,
false);
620 double read_rate = 0,
624 memset(&walusage, 0,
sizeof(
WalUsage));
635 msgfmt =
_(
"finished vacuuming \"%s.%s.%s\": index scans: %d\n");
646 msgfmt =
_(
"automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
648 msgfmt =
_(
"automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
653 msgfmt =
_(
"automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
655 msgfmt =
_(
"automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
666 orig_rel_pages == 0 ? 100.0 :
669 _(
"tuples: %lld removed, %lld remain, %lld are dead but not yet removable\n"),
675 _(
"tuples missed: %lld dead from %u pages not removed due to cleanup lock contention\n"),
681 _(
"removable cutoff: %u, which was %d XIDs old when operation ended\n"),
683 if (frozenxid_updated)
688 _(
"new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
691 if (minmulti_updated)
696 _(
"new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
699 appendStringInfo(&
buf,
_(
"frozen: %u pages from table (%.2f%% of total) had %lld tuples frozen\n"),
701 orig_rel_pages == 0 ? 100.0 :
711 msgfmt =
_(
"%u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n");
720 msgfmt =
_(
"%u pages from table (%.2f%% of total) have %lld dead item identifiers\n");
724 orig_rel_pages == 0 ? 100.0 :
735 _(
"index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
750 if (secs_dur > 0 || usecs_dur > 0)
752 read_rate = (double) BLCKSZ * PageMissOp / (1024 * 1024) /
753 (secs_dur + usecs_dur / 1000000.0);
754 write_rate = (double) BLCKSZ * PageDirtyOp / (1024 * 1024) /
755 (secs_dur + usecs_dur / 1000000.0);
758 read_rate, write_rate);
760 _(
"buffer usage: %lld hits, %lld misses, %lld dirtied\n"),
761 (
long long) PageHitOp,
762 (
long long) PageMissOp,
763 (
long long) PageDirtyOp);
765 _(
"WAL usage: %lld records, %lld full page images, %llu bytes\n"),
768 (
unsigned long long) walusage.
wal_bytes);
829 next_unskippable_block,
830 next_fsm_block_to_vacuum = 0;
833 bool next_unskippable_allvis,
834 skipping_current_range;
835 const int initprog_index[] = {
840 int64 initprog_val[3];
844 initprog_val[1] = rel_pages;
850 &next_unskippable_allvis,
851 &skipping_current_range);
852 for (blkno = 0; blkno < rel_pages; blkno++)
856 bool all_visible_according_to_vm;
859 if (blkno == next_unskippable_block)
865 all_visible_according_to_vm = next_unskippable_allvis;
868 &next_unskippable_allvis,
869 &skipping_current_range);
871 Assert(next_unskippable_block >= blkno + 1);
876 Assert(blkno < rel_pages - 1);
878 if (skipping_current_range)
882 all_visible_according_to_vm =
true;
937 next_fsm_block_to_vacuum = blkno;
1058 next_fsm_block_to_vacuum = blkno;
1091 if (!all_visible_according_to_vm && prunestate.
all_visible)
1130 elog(
WARNING,
"page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1153 elog(
WARNING,
"page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
1166 else if (all_visible_according_to_vm && prunestate.
all_visible &&
1262 if (blkno > next_fsm_block_to_vacuum)
1297 bool *next_unskippable_allvis,
bool *skipping_current_range)
1300 next_unskippable_block = next_block,
1301 nskippable_blocks = 0;
1302 bool skipsallvis =
false;
1304 *next_unskippable_allvis =
true;
1305 while (next_unskippable_block < rel_pages)
1308 next_unskippable_block,
1314 *next_unskippable_allvis =
false;
1328 if (next_unskippable_block == rel_pages - 1)
1335 *next_unskippable_allvis =
false;
1357 next_unskippable_block++;
1358 nskippable_blocks++;
1372 *skipping_current_range =
false;
1375 *skipping_current_range =
true;
1380 return next_unskippable_block;
1551 recently_dead_tuples;
1579 recently_dead_tuples = 0;
1598 prunestate->
hastup =
false;
1608 bool totally_frozen;
1624 prunestate->
hastup =
true;
1644 deadoffsets[lpdead_items++] = offnum;
1739 recently_dead_tuples++;
1765 elog(
ERROR,
"unexpected HeapTupleSatisfiesVacuum result");
1769 prunestate->
hastup =
true;
1773 &frozen[tuples_frozen], &totally_frozen))
1776 frozen[tuples_frozen++].
offset = offnum;
1784 if (!totally_frozen)
1813 if (tuples_frozen == 0)
1856 snapshotConflictHorizon,
1857 frozen, tuples_frozen);
1879 #ifdef USE_ASSERT_CHECKING
1897 if (lpdead_items > 0)
1907 for (
int i = 0;
i < lpdead_items;
i++)
1964 bool *recordfreespace)
1970 recently_dead_tuples,
1980 *recordfreespace =
false;
1984 recently_dead_tuples = 0;
1985 missed_dead_tuples = 0;
2013 deadoffsets[lpdead_items++] = offnum;
2020 &NoFreezePageRelfrozenXid,
2021 &NoFreezePageRelminMxid))
2074 missed_dead_tuples++;
2081 recently_dead_tuples++;
2090 elog(
ERROR,
"unexpected HeapTupleSatisfiesVacuum result");
2109 if (lpdead_items > 0)
2120 missed_dead_tuples += lpdead_items;
2123 *recordfreespace =
true;
2125 else if (lpdead_items == 0)
2131 *recordfreespace =
true;
2147 for (
int i = 0;
i < lpdead_items;
i++)
2163 *recordfreespace =
false;
2172 if (missed_dead_tuples > 0)
2320 bool allindexes =
true;
2321 double old_live_tuples = vacrel->
rel->
rd_rel->reltuples;
2444 vacrel->
blkno = blkno;
2482 (
errmsg(
"table \"%s\": removed %lld dead item identifiers in %u pages",
2538 unused[nunused++] = toff;
2598 vmbuffer, visibility_cutoff_xid, flags);
2642 (
errmsg(
"bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2645 errdetail(
"The table's relfrozenxid or relminmxid is too far in the past."),
2646 errhint(
"Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2647 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2684 estimated_count, vacrel);
2716 ivinfo.
index = indrel;
2759 double reltuples,
bool estimated_count,
2765 ivinfo.
index = indrel;
2831 if (possibly_freeable > 0 &&
2847 bool lock_waiter_detected;
2870 lock_waiter_detected =
false;
2891 (
errmsg(
"\"%s\": stopping truncate due to conflicting lock request",
2909 if (new_rel_pages != orig_rel_pages)
2929 vacrel->
blkno = new_rel_pages;
2931 if (new_rel_pages >= orig_rel_pages)
2961 (
errmsg(
"table \"%s\": truncated %u to %u pages",
2963 orig_rel_pages, new_rel_pages)));
2964 orig_rel_pages = new_rel_pages;
2965 }
while (new_rel_pages > vacrel->
nonempty_pages && lock_waiter_detected);
2991 "prefetch size must be power of 2");
3009 if ((blkno % 32) == 0)
3015 elapsed = currenttime;
3023 (
errmsg(
"table \"%s\": suspending truncate due to conflicting lock request",
3026 *lock_waiter_detected =
true;
3029 starttime = currenttime;
3043 if (prefetchedUntil > blkno)
3049 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3054 prefetchedUntil = prefetchStart;
3129 max_items =
Min(max_items, INT_MAX);
3145 return (
int) max_items;
3183 (
errmsg(
"disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3246 bool all_visible =
true;
3253 offnum <= maxoff && all_visible;
3278 all_visible =
false;
3279 *all_frozen =
false;
3299 all_visible =
false;
3300 *all_frozen =
false;
3312 all_visible =
false;
3313 *all_frozen =
false;
3320 *visibility_cutoff_xid = xmin;
3323 if (all_visible && *all_frozen &&
3325 *all_frozen =
false;
3334 all_visible =
false;
3335 *all_frozen =
false;
3339 elog(
ERROR,
"unexpected HeapTupleSatisfiesVacuum result");
3362 for (
int idx = 0;
idx < nindexes;
idx++)
3393 switch (errinfo->
phase)
3399 errcontext(
"while scanning block %u offset %u of relation \"%s.%s\"",
3402 errcontext(
"while scanning block %u of relation \"%s.%s\"",
3406 errcontext(
"while scanning relation \"%s.%s\"",
3414 errcontext(
"while vacuuming block %u offset %u of relation \"%s.%s\"",
3417 errcontext(
"while vacuuming block %u of relation \"%s.%s\"",
3421 errcontext(
"while vacuuming relation \"%s.%s\"",
3426 errcontext(
"while vacuuming index \"%s\" of relation \"%s.%s\"",
3431 errcontext(
"while cleaning up index \"%s\" of relation \"%s.%s\"",
3437 errcontext(
"while truncating relation \"%s.%s\" to %u blocks",
3463 vacrel->
blkno = blkno;
3465 vacrel->
phase = phase;
Datum idx(PG_FUNCTION_ARGS)
bool IsAutoVacuumWorkerProcess(void)
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
TimestampTz GetCurrentTimestamp(void)
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
#define InvalidBlockNumber
static bool BlockNumberIsValid(BlockNumber blockNumber)
BlockNumber BufferGetBlockNumber(Buffer buffer)
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
void ReleaseBuffer(Buffer buffer)
void UnlockReleaseBuffer(Buffer buffer)
void MarkBufferDirty(Buffer buffer)
void LockBufferForCleanup(Buffer buffer)
void LockBuffer(Buffer buffer, int mode)
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
bool ConditionalLockBufferForCleanup(Buffer buffer)
#define BUFFER_LOCK_UNLOCK
#define BUFFER_LOCK_SHARE
#define RelationGetNumberOfBlocks(reln)
static Page BufferGetPage(Buffer buffer)
#define BUFFER_LOCK_EXCLUSIVE
static bool BufferIsValid(Buffer bufnum)
Size PageGetHeapFreeSpace(Page page)
void PageTruncateLinePointerArray(Page page)
static bool PageIsEmpty(Page page)
static Item PageGetItem(Page page, ItemId itemId)
static void PageClearAllVisible(Page page)
#define SizeOfPageHeaderData
static void PageSetAllVisible(Page page)
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
static bool PageIsNew(Page page)
static bool PageIsAllVisible(Page page)
static void PageSetLSN(Page page, XLogRecPtr lsn)
static XLogRecPtr PageGetLSN(Page page)
static OffsetNumber PageGetMaxOffsetNumber(Page page)
TransactionId MultiXactId
#define StaticAssertStmt(condition, errmessage)
char * get_database_name(Oid dbid)
static void PGresult * res
elog(ERROR, "%s: %s", p2, msg)
int errmsg_internal(const char *fmt,...)
int errdetail(const char *fmt,...)
ErrorContextCallback * error_context_stack
int errhint(const char *fmt,...)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
void heap_freeze_execute_prepared(Relation rel, Buffer buffer, TransactionId snapshotConflictHorizon, HeapTupleFreeze *tuples, int ntuples)
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen)
@ HEAPTUPLE_RECENTLY_DEAD
@ HEAPTUPLE_INSERT_IN_PROGRESS
@ HEAPTUPLE_DELETE_IN_PROGRESS
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
#define XLOG_HEAP2_VACUUM
HeapTupleHeaderData * HeapTupleHeader
#define HeapTupleHeaderGetXmin(tup)
#define HeapTupleHeaderXminCommitted(tup)
#define MaxHeapTuplesPerPage
#define INSTR_TIME_SET_CURRENT(t)
#define INSTR_TIME_SUBTRACT(x, y)
#define INSTR_TIME_GET_MICROSEC(t)
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
#define ItemIdGetLength(itemId)
#define ItemIdIsNormal(itemId)
#define ItemIdIsDead(itemId)
#define ItemIdIsUsed(itemId)
#define ItemIdSetUnused(itemId)
#define ItemIdIsRedirected(itemId)
#define ItemIdHasStorage(itemId)
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
void ResetLatch(Latch *latch)
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
#define WL_EXIT_ON_PM_DEATH
Assert(fmt[strlen(fmt) - 1] !='\n')
void UnlockRelation(Relation relation, LOCKMODE lockmode)
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
#define AccessExclusiveLock
char * get_namespace_name(Oid nspid)
char * pstrdup(const char *in)
void pfree(void *pointer)
void * palloc0(Size size)
#define START_CRIT_SECTION()
#define CHECK_FOR_INTERRUPTS()
#define END_CRIT_SECTION()
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
#define InvalidMultiXactId
#define InvalidOffsetNumber
#define OffsetNumberIsValid(offsetNumber)
#define OffsetNumberNext(offsetNumber)
#define FirstOffsetNumber
const char * pg_rusage_show(const PGRUsage *ru0)
void pg_rusage_init(PGRUsage *ru0)
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
GlobalVisState * GlobalVisTestFor(Relation rel)
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
#define PROGRESS_VACUUM_PHASE
#define PROGRESS_VACUUM_NUM_DEAD_TUPLES
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
#define PROGRESS_VACUUM_MAX_DEAD_TUPLES
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
#define PROGRESS_VACUUM_PHASE_TRUNCATE
int heap_page_prune(Relation relation, Buffer buffer, GlobalVisState *vistest, TransactionId old_snap_xmin, TimestampTz old_snap_ts, int *nnewlpdead, OffsetNumber *off_loc)
#define RelationGetRelid(relation)
#define RelationGetRelationName(relation)
#define RelationNeedsWAL(relation)
#define RelationUsesLocalBuffers(relation)
#define RelationGetNamespace(relation)
int old_snapshot_threshold
void RelationTruncate(Relation rel, BlockNumber nblocks)
void appendStringInfo(StringInfo str, const char *fmt,...)
void appendStringInfoString(StringInfo str, const char *s)
void initStringInfo(StringInfo str)
struct ErrorContextCallback * previous
void(* callback)(void *arg)
MultiXactId NoFreezePageRelminMxid
TransactionId FreezePageRelfrozenXid
MultiXactId FreezePageRelminMxid
TransactionId NoFreezePageRelfrozenXid
BlockNumber pages_deleted
BlockNumber pages_newly_deleted
BufferAccessStrategy strategy
TransactionId visibility_cutoff_xid
ParallelVacuumState * pvs
BlockNumber nonempty_pages
BlockNumber scanned_pages
BlockNumber removed_pages
IndexBulkDeleteResult ** indstats
TransactionId NewRelfrozenXid
bool consider_bypass_optimization
int64 recently_dead_tuples
BlockNumber missed_dead_pages
BufferAccessStrategy bstrategy
BlockNumber lpdead_item_pages
MultiXactId NewRelminMxid
struct VacuumCutoffs cutoffs
VacDeadItems * dead_items
ItemPointerData items[FLEXIBLE_ARRAY_MEMBER]
TransactionId FreezeLimit
TransactionId relfrozenxid
MultiXactId MultiXactCutoff
VacOptValue index_cleanup
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
static TransactionId ReadNextTransactionId(void)
#define TransactionIdRetreat(dest)
#define InvalidTransactionId
#define TransactionIdIsValid(xid)
#define TransactionIdIsNormal(xid)
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, VacDeadItems *dead_items)
Size vac_max_items_to_alloc_size(int max_items)
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
void vacuum_delay_point(void)
bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params, struct VacuumCutoffs *cutoffs)
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
bool VacuumFailsafeActive
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
#define MAXDEADITEMS(avail_mem)
@ VACOPTVALUE_UNSPECIFIED
#define VACOPT_DISABLE_PAGE_SKIPPING
static void dead_items_cleanup(LVRelState *vacrel)
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
static void update_relstats_all_indexes(LVRelState *vacrel)
struct LVPagePruneState LVPagePruneState
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
static void vacuum_error_callback(void *arg)
static void lazy_truncate_heap(LVRelState *vacrel)
static void lazy_vacuum(LVRelState *vacrel)
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer vmbuffer)
#define REL_TRUNCATE_MINIMUM
static bool should_attempt_truncation(LVRelState *vacrel)
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
@ VACUUM_ERRCB_PHASE_TRUNCATE
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
@ VACUUM_ERRCB_PHASE_UNKNOWN
static void lazy_scan_heap(LVRelState *vacrel)
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *hastup, bool *recordfreespace)
#define ParallelVacuumIsActive(vacrel)
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, LVPagePruneState *prunestate)
void heap_vacuum_rel(Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy)
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
#define REL_TRUNCATE_FRACTION
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
static int dead_items_max_items(LVRelState *vacrel)
struct LVSavedErrInfo LVSavedErrInfo
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
struct LVRelState LVRelState
static BlockNumber lazy_scan_skip(LVRelState *vacrel, Buffer *vmbuffer, BlockNumber next_block, bool *next_unskippable_allvis, bool *skipping_current_range)
#define BYPASS_THRESHOLD_PAGES
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
#define SKIP_PAGES_THRESHOLD
#define FAILSAFE_EVERY_PAGES
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
#define VACUUM_FSM_EVERY_PAGES
VacDeadItems * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs)
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int max_items, int elevel, BufferAccessStrategy bstrategy)
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
#define VM_ALL_FROZEN(r, b, v)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
@ WAIT_EVENT_VACUUM_TRUNCATE
bool IsInParallelMode(void)
#define InvalidXLogRecPtr
void XLogRegisterData(char *data, uint32 len)
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
void XLogRegisterBufData(uint8 block_id, char *data, uint32 len)
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
void XLogBeginInsert(void)