76 #define REL_TRUNCATE_MINIMUM 1000
77 #define REL_TRUNCATE_FRACTION 16
86 #define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20
87 #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50
88 #define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000
94 #define BYPASS_THRESHOLD_PAGES 0.02
100 #define FAILSAFE_EVERY_PAGES \
101 ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
109 #define VACUUM_FSM_EVERY_PAGES \
110 ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
116 #define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
122 #define PREFETCH_SIZE ((BlockNumber) 32)
128 #define ParallelVacuumIsActive(vacrel) ((vacrel)->pvs != NULL)
248 bool *next_unskippable_allvis,
249 bool *skipping_current_range);
252 bool sharelock,
Buffer vmbuffer);
258 bool *hastup,
bool *recordfreespace);
273 bool estimated_count,
278 bool *lock_waiter_detected);
326 char **indnames = NULL;
365 errcallback.
arg = vacrel;
374 if (instrument && vacrel->
nindexes > 0)
479 (
errmsg(
"aggressively vacuuming \"%s.%s.%s\"",
484 (
errmsg(
"vacuuming \"%s.%s.%s\"",
568 if (new_rel_allvisible > new_rel_pages)
569 new_rel_allvisible = new_rel_pages;
579 new_rel_allvisible, vacrel->
nindexes > 0,
581 &frozenxid_updated, &minmulti_updated,
false);
617 double read_rate = 0,
621 memset(&walusage, 0,
sizeof(
WalUsage));
632 msgfmt =
_(
"finished vacuuming \"%s.%s.%s\": index scans: %d\n");
643 msgfmt =
_(
"automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
645 msgfmt =
_(
"automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
650 msgfmt =
_(
"automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
652 msgfmt =
_(
"automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
663 orig_rel_pages == 0 ? 100.0 :
666 _(
"tuples: %lld removed, %lld remain, %lld are dead but not yet removable\n"),
672 _(
"tuples missed: %lld dead from %u pages not removed due to cleanup lock contention\n"),
678 _(
"removable cutoff: %u, which was %d XIDs old when operation ended\n"),
680 if (frozenxid_updated)
685 _(
"new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
688 if (minmulti_updated)
693 _(
"new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
696 appendStringInfo(&
buf,
_(
"frozen: %u pages from table (%.2f%% of total) had %lld tuples frozen\n"),
698 orig_rel_pages == 0 ? 100.0 :
708 msgfmt =
_(
"%u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n");
717 msgfmt =
_(
"%u pages from table (%.2f%% of total) have %lld dead item identifiers\n");
721 orig_rel_pages == 0 ? 100.0 :
732 _(
"index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
747 if (secs_dur > 0 || usecs_dur > 0)
749 read_rate = (double) BLCKSZ * PageMissOp / (1024 * 1024) /
750 (secs_dur + usecs_dur / 1000000.0);
751 write_rate = (double) BLCKSZ * PageDirtyOp / (1024 * 1024) /
752 (secs_dur + usecs_dur / 1000000.0);
755 read_rate, write_rate);
757 _(
"buffer usage: %lld hits, %lld misses, %lld dirtied\n"),
758 (
long long) PageHitOp,
759 (
long long) PageMissOp,
760 (
long long) PageDirtyOp);
762 _(
"WAL usage: %lld records, %lld full page images, %llu bytes\n"),
765 (
unsigned long long) walusage.
wal_bytes);
826 next_unskippable_block,
827 next_fsm_block_to_vacuum = 0;
830 bool next_unskippable_allvis,
831 skipping_current_range;
832 const int initprog_index[] = {
837 int64 initprog_val[3];
841 initprog_val[1] = rel_pages;
847 &next_unskippable_allvis,
848 &skipping_current_range);
849 for (blkno = 0; blkno < rel_pages; blkno++)
853 bool all_visible_according_to_vm;
856 if (blkno == next_unskippable_block)
862 all_visible_according_to_vm = next_unskippable_allvis;
865 &next_unskippable_allvis,
866 &skipping_current_range);
868 Assert(next_unskippable_block >= blkno + 1);
873 Assert(blkno < rel_pages - 1);
875 if (skipping_current_range)
879 all_visible_according_to_vm =
true;
934 next_fsm_block_to_vacuum = blkno;
1055 next_fsm_block_to_vacuum = blkno;
1088 if (!all_visible_according_to_vm && prunestate.
all_visible)
1127 elog(
WARNING,
"page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1150 elog(
WARNING,
"page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
1163 else if (all_visible_according_to_vm && prunestate.
all_visible &&
1259 if (blkno > next_fsm_block_to_vacuum)
1294 bool *next_unskippable_allvis,
bool *skipping_current_range)
1297 next_unskippable_block = next_block,
1298 nskippable_blocks = 0;
1299 bool skipsallvis =
false;
1301 *next_unskippable_allvis =
true;
1302 while (next_unskippable_block < rel_pages)
1305 next_unskippable_block,
1311 *next_unskippable_allvis =
false;
1325 if (next_unskippable_block == rel_pages - 1)
1332 *next_unskippable_allvis =
false;
1354 next_unskippable_block++;
1355 nskippable_blocks++;
1369 *skipping_current_range =
false;
1372 *skipping_current_range =
true;
1377 return next_unskippable_block;
1548 recently_dead_tuples;
1576 recently_dead_tuples = 0;
1595 prunestate->
hastup =
false;
1605 bool totally_frozen;
1621 prunestate->
hastup =
true;
1641 deadoffsets[lpdead_items++] = offnum;
1736 recently_dead_tuples++;
1762 elog(
ERROR,
"unexpected HeapTupleSatisfiesVacuum result");
1766 prunestate->
hastup =
true;
1770 &frozen[tuples_frozen], &totally_frozen))
1773 frozen[tuples_frozen++].
offset = offnum;
1781 if (!totally_frozen)
1810 if (tuples_frozen == 0)
1853 snapshotConflictHorizon,
1854 frozen, tuples_frozen);
1876 #ifdef USE_ASSERT_CHECKING
1894 if (lpdead_items > 0)
1904 for (
int i = 0;
i < lpdead_items;
i++)
1961 bool *recordfreespace)
1967 recently_dead_tuples,
1977 *recordfreespace =
false;
1981 recently_dead_tuples = 0;
1982 missed_dead_tuples = 0;
2010 deadoffsets[lpdead_items++] = offnum;
2017 &NoFreezePageRelfrozenXid,
2018 &NoFreezePageRelminMxid))
2071 missed_dead_tuples++;
2078 recently_dead_tuples++;
2087 elog(
ERROR,
"unexpected HeapTupleSatisfiesVacuum result");
2106 if (lpdead_items > 0)
2117 missed_dead_tuples += lpdead_items;
2120 *recordfreespace =
true;
2122 else if (lpdead_items == 0)
2128 *recordfreespace =
true;
2144 for (
int i = 0;
i < lpdead_items;
i++)
2160 *recordfreespace =
false;
2169 if (missed_dead_tuples > 0)
2317 bool allindexes =
true;
2318 double old_live_tuples = vacrel->
rel->
rd_rel->reltuples;
2441 vacrel->
blkno = blkno;
2479 (
errmsg(
"table \"%s\": removed %lld dead item identifiers in %u pages",
2535 unused[nunused++] = toff;
2595 vmbuffer, visibility_cutoff_xid, flags);
2632 (
errmsg(
"bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2635 errdetail(
"The table's relfrozenxid or relminmxid is too far in the past."),
2636 errhint(
"Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2637 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2674 estimated_count, vacrel);
2706 ivinfo.
index = indrel;
2748 double reltuples,
bool estimated_count,
2754 ivinfo.
index = indrel;
2819 if (possibly_freeable > 0 &&
2835 bool lock_waiter_detected;
2858 lock_waiter_detected =
false;
2879 (
errmsg(
"\"%s\": stopping truncate due to conflicting lock request",
2897 if (new_rel_pages != orig_rel_pages)
2917 vacrel->
blkno = new_rel_pages;
2919 if (new_rel_pages >= orig_rel_pages)
2949 (
errmsg(
"table \"%s\": truncated %u to %u pages",
2951 orig_rel_pages, new_rel_pages)));
2952 orig_rel_pages = new_rel_pages;
2953 }
while (new_rel_pages > vacrel->
nonempty_pages && lock_waiter_detected);
2979 "prefetch size must be power of 2");
2997 if ((blkno % 32) == 0)
3003 elapsed = currenttime;
3011 (
errmsg(
"table \"%s\": suspending truncate due to conflicting lock request",
3014 *lock_waiter_detected =
true;
3017 starttime = currenttime;
3031 if (prefetchedUntil > blkno)
3037 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3042 prefetchedUntil = prefetchStart;
3117 max_items =
Min(max_items, INT_MAX);
3133 return (
int) max_items;
3171 (
errmsg(
"disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3234 bool all_visible =
true;
3241 offnum <= maxoff && all_visible;
3266 all_visible =
false;
3267 *all_frozen =
false;
3287 all_visible =
false;
3288 *all_frozen =
false;
3300 all_visible =
false;
3301 *all_frozen =
false;
3308 *visibility_cutoff_xid = xmin;
3311 if (all_visible && *all_frozen &&
3313 *all_frozen =
false;
3322 all_visible =
false;
3323 *all_frozen =
false;
3327 elog(
ERROR,
"unexpected HeapTupleSatisfiesVacuum result");
3350 for (
int idx = 0;
idx < nindexes;
idx++)
3381 switch (errinfo->
phase)
3387 errcontext(
"while scanning block %u offset %u of relation \"%s.%s\"",
3390 errcontext(
"while scanning block %u of relation \"%s.%s\"",
3394 errcontext(
"while scanning relation \"%s.%s\"",
3402 errcontext(
"while vacuuming block %u offset %u of relation \"%s.%s\"",
3405 errcontext(
"while vacuuming block %u of relation \"%s.%s\"",
3409 errcontext(
"while vacuuming relation \"%s.%s\"",
3414 errcontext(
"while vacuuming index \"%s\" of relation \"%s.%s\"",
3419 errcontext(
"while cleaning up index \"%s\" of relation \"%s.%s\"",
3425 errcontext(
"while truncating relation \"%s.%s\" to %u blocks",
3451 vacrel->
blkno = blkno;
3453 vacrel->
phase = phase;
Datum idx(PG_FUNCTION_ARGS)
bool IsAutoVacuumWorkerProcess(void)
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
TimestampTz GetCurrentTimestamp(void)
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
#define InvalidBlockNumber
static bool BlockNumberIsValid(BlockNumber blockNumber)
BlockNumber BufferGetBlockNumber(Buffer buffer)
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
void ReleaseBuffer(Buffer buffer)
void UnlockReleaseBuffer(Buffer buffer)
void MarkBufferDirty(Buffer buffer)
void LockBufferForCleanup(Buffer buffer)
void LockBuffer(Buffer buffer, int mode)
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
bool ConditionalLockBufferForCleanup(Buffer buffer)
#define BUFFER_LOCK_UNLOCK
#define BUFFER_LOCK_SHARE
#define RelationGetNumberOfBlocks(reln)
static Page BufferGetPage(Buffer buffer)
#define BUFFER_LOCK_EXCLUSIVE
static bool BufferIsValid(Buffer bufnum)
Size PageGetHeapFreeSpace(Page page)
void PageTruncateLinePointerArray(Page page)
static bool PageIsEmpty(Page page)
static Item PageGetItem(Page page, ItemId itemId)
static void PageClearAllVisible(Page page)
#define SizeOfPageHeaderData
static void PageSetAllVisible(Page page)
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
static bool PageIsNew(Page page)
static bool PageIsAllVisible(Page page)
static void PageSetLSN(Page page, XLogRecPtr lsn)
static XLogRecPtr PageGetLSN(Page page)
static OffsetNumber PageGetMaxOffsetNumber(Page page)
TransactionId MultiXactId
#define StaticAssertStmt(condition, errmessage)
char * get_database_name(Oid dbid)
static void PGresult * res
elog(ERROR, "%s: %s", p2, msg)
int errmsg_internal(const char *fmt,...)
int errdetail(const char *fmt,...)
ErrorContextCallback * error_context_stack
int errhint(const char *fmt,...)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
void heap_freeze_execute_prepared(Relation rel, Buffer buffer, TransactionId snapshotConflictHorizon, HeapTupleFreeze *tuples, int ntuples)
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen)
@ HEAPTUPLE_RECENTLY_DEAD
@ HEAPTUPLE_INSERT_IN_PROGRESS
@ HEAPTUPLE_DELETE_IN_PROGRESS
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
#define XLOG_HEAP2_VACUUM
HeapTupleHeaderData * HeapTupleHeader
#define HeapTupleHeaderGetXmin(tup)
#define HeapTupleHeaderXminCommitted(tup)
#define MaxHeapTuplesPerPage
#define INSTR_TIME_SET_CURRENT(t)
#define INSTR_TIME_SUBTRACT(x, y)
#define INSTR_TIME_GET_MICROSEC(t)
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
#define ItemIdGetLength(itemId)
#define ItemIdIsNormal(itemId)
#define ItemIdIsDead(itemId)
#define ItemIdIsUsed(itemId)
#define ItemIdSetUnused(itemId)
#define ItemIdIsRedirected(itemId)
#define ItemIdHasStorage(itemId)
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
void ResetLatch(Latch *latch)
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
#define WL_EXIT_ON_PM_DEATH
Assert(fmt[strlen(fmt) - 1] !='\n')
void UnlockRelation(Relation relation, LOCKMODE lockmode)
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
#define AccessExclusiveLock
char * get_namespace_name(Oid nspid)
char * pstrdup(const char *in)
void pfree(void *pointer)
void * palloc0(Size size)
#define START_CRIT_SECTION()
#define CHECK_FOR_INTERRUPTS()
#define END_CRIT_SECTION()
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
#define InvalidMultiXactId
#define InvalidOffsetNumber
#define OffsetNumberIsValid(offsetNumber)
#define OffsetNumberNext(offsetNumber)
#define FirstOffsetNumber
const char * pg_rusage_show(const PGRUsage *ru0)
void pg_rusage_init(PGRUsage *ru0)
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
GlobalVisState * GlobalVisTestFor(Relation rel)
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
#define PROGRESS_VACUUM_PHASE
#define PROGRESS_VACUUM_NUM_DEAD_TUPLES
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
#define PROGRESS_VACUUM_MAX_DEAD_TUPLES
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
#define PROGRESS_VACUUM_PHASE_TRUNCATE
int heap_page_prune(Relation relation, Buffer buffer, GlobalVisState *vistest, TransactionId old_snap_xmin, TimestampTz old_snap_ts, int *nnewlpdead, OffsetNumber *off_loc)
#define RelationGetRelid(relation)
#define RelationGetRelationName(relation)
#define RelationNeedsWAL(relation)
#define RelationUsesLocalBuffers(relation)
#define RelationGetNamespace(relation)
int old_snapshot_threshold
void RelationTruncate(Relation rel, BlockNumber nblocks)
void appendStringInfo(StringInfo str, const char *fmt,...)
void appendStringInfoString(StringInfo str, const char *s)
void initStringInfo(StringInfo str)
struct ErrorContextCallback * previous
void(* callback)(void *arg)
MultiXactId NoFreezePageRelminMxid
TransactionId FreezePageRelfrozenXid
MultiXactId FreezePageRelminMxid
TransactionId NoFreezePageRelfrozenXid
BlockNumber pages_deleted
BlockNumber pages_newly_deleted
BufferAccessStrategy strategy
TransactionId visibility_cutoff_xid
ParallelVacuumState * pvs
BlockNumber nonempty_pages
BlockNumber scanned_pages
BlockNumber removed_pages
IndexBulkDeleteResult ** indstats
TransactionId NewRelfrozenXid
bool consider_bypass_optimization
int64 recently_dead_tuples
BlockNumber missed_dead_pages
BufferAccessStrategy bstrategy
BlockNumber lpdead_item_pages
MultiXactId NewRelminMxid
struct VacuumCutoffs cutoffs
VacDeadItems * dead_items
ItemPointerData items[FLEXIBLE_ARRAY_MEMBER]
TransactionId FreezeLimit
TransactionId relfrozenxid
MultiXactId MultiXactCutoff
VacOptValue index_cleanup
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
static TransactionId ReadNextTransactionId(void)
#define TransactionIdRetreat(dest)
#define InvalidTransactionId
#define TransactionIdIsValid(xid)
#define TransactionIdIsNormal(xid)
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, VacDeadItems *dead_items)
Size vac_max_items_to_alloc_size(int max_items)
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
void vacuum_delay_point(void)
bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params, struct VacuumCutoffs *cutoffs)
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
#define MAXDEADITEMS(avail_mem)
@ VACOPTVALUE_UNSPECIFIED
#define VACOPT_DISABLE_PAGE_SKIPPING
static void dead_items_cleanup(LVRelState *vacrel)
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
static void update_relstats_all_indexes(LVRelState *vacrel)
struct LVPagePruneState LVPagePruneState
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
static void vacuum_error_callback(void *arg)
static void lazy_truncate_heap(LVRelState *vacrel)
static void lazy_vacuum(LVRelState *vacrel)
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer vmbuffer)
#define REL_TRUNCATE_MINIMUM
static bool should_attempt_truncation(LVRelState *vacrel)
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
@ VACUUM_ERRCB_PHASE_TRUNCATE
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
@ VACUUM_ERRCB_PHASE_UNKNOWN
static void lazy_scan_heap(LVRelState *vacrel)
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *hastup, bool *recordfreespace)
#define ParallelVacuumIsActive(vacrel)
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, LVPagePruneState *prunestate)
void heap_vacuum_rel(Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy)
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
#define REL_TRUNCATE_FRACTION
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
static int dead_items_max_items(LVRelState *vacrel)
struct LVSavedErrInfo LVSavedErrInfo
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
struct LVRelState LVRelState
static BlockNumber lazy_scan_skip(LVRelState *vacrel, Buffer *vmbuffer, BlockNumber next_block, bool *next_unskippable_allvis, bool *skipping_current_range)
#define BYPASS_THRESHOLD_PAGES
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
#define SKIP_PAGES_THRESHOLD
#define FAILSAFE_EVERY_PAGES
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
#define VACUUM_FSM_EVERY_PAGES
VacDeadItems * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs)
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int max_items, int elevel, BufferAccessStrategy bstrategy)
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
#define VM_ALL_FROZEN(r, b, v)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
@ WAIT_EVENT_VACUUM_TRUNCATE
bool IsInParallelMode(void)
#define InvalidXLogRecPtr
void XLogRegisterData(char *data, uint32 len)
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
void XLogRegisterBufData(uint8 block_id, char *data, uint32 len)
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
void XLogBeginInsert(void)