PostgreSQL Source Code  git master
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xlog.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelStats
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define VACUUM_FSM_EVERY_PAGES   ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define LAZY_ALLOC_TUPLES   MaxHeapTuplesPerPage
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define FORCE_CHECK_PAGE()   (blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))
 

Typedefs

typedef struct LVRelStats LVRelStats
 

Functions

static void lazy_scan_heap (Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, Relation *Irel, int nindexes, bool aggressive)
 
static void lazy_vacuum_heap (Relation onerel, LVRelStats *vacrelstats)
 
static bool lazy_check_needs_freeze (Buffer buf, bool *hastup)
 
static void lazy_vacuum_index (Relation indrel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats)
 
static void lazy_cleanup_index (Relation indrel, IndexBulkDeleteResult *stats, LVRelStats *vacrelstats)
 
static int lazy_vacuum_page (Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
 
static bool should_attempt_truncation (VacuumParams *params, LVRelStats *vacrelstats)
 
static void lazy_truncate_heap (Relation onerel, LVRelStats *vacrelstats)
 
static BlockNumber count_nondeletable_pages (Relation onerel, LVRelStats *vacrelstats)
 
static void lazy_space_alloc (LVRelStats *vacrelstats, BlockNumber relblocks)
 
static void lazy_record_dead_tuple (LVRelStats *vacrelstats, ItemPointer itemptr)
 
static bool lazy_tid_reaped (ItemPointer itemptr, void *state)
 
static int vac_cmp_itemptr (const void *left, const void *right)
 
static bool heap_page_is_all_visible (Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
void heap_vacuum_rel (Relation onerel, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static void vacuum_log_cleanup_info (Relation rel, LVRelStats *vacrelstats)
 

Variables

static int elevel = -1
 
static TransactionId OldestXmin
 
static TransactionId FreezeLimit
 
static MultiXactId MultiXactCutoff
 
static BufferAccessStrategy vac_strategy
 

Macro Definition Documentation

◆ FORCE_CHECK_PAGE

#define FORCE_CHECK_PAGE ( )    (blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))

Referenced by lazy_scan_heap().

◆ LAZY_ALLOC_TUPLES

#define LAZY_ALLOC_TUPLES   MaxHeapTuplesPerPage

Definition at line 99 of file vacuumlazy.c.

Referenced by lazy_space_alloc().

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 111 of file vacuumlazy.c.

Referenced by count_nondeletable_pages().

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 72 of file vacuumlazy.c.

Referenced by should_attempt_truncation().

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 71 of file vacuumlazy.c.

Referenced by should_attempt_truncation().

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 105 of file vacuumlazy.c.

Referenced by lazy_scan_heap().

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES   ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 91 of file vacuumlazy.c.

Referenced by lazy_scan_heap().

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 81 of file vacuumlazy.c.

Referenced by count_nondeletable_pages().

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 83 of file vacuumlazy.c.

Referenced by lazy_truncate_heap().

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 82 of file vacuumlazy.c.

Referenced by lazy_truncate_heap().

Typedef Documentation

◆ LVRelStats

typedef struct LVRelStats LVRelStats

Function Documentation

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 2001 of file vacuumlazy.c.

References AccessExclusiveLock, buf, BUFFER_LOCK_SHARE, BufferGetPage, CHECK_FOR_INTERRUPTS, elevel, ereport, errmsg(), FirstOffsetNumber, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LVRelStats::lock_waiter_detected, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelStats::nonempty_pages, OffsetNumberNext, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, PageIsNew, PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelStats::rel_pages, RelationGetRelationName, StaticAssertStmt, UnlockReleaseBuffer(), and VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL.

Referenced by lazy_truncate_heap().

2002 {
2003  BlockNumber blkno;
2004  BlockNumber prefetchedUntil;
2005  instr_time starttime;
2006 
2007  /* Initialize the starttime if we check for conflicting lock requests */
2008  INSTR_TIME_SET_CURRENT(starttime);
2009 
2010  /*
2011  * Start checking blocks at what we believe relation end to be and move
2012  * backwards. (Strange coding of loop control is needed because blkno is
2013  * unsigned.) To make the scan faster, we prefetch a few blocks at a time
2014  * in forward direction, so that OS-level readahead can kick in.
2015  */
2016  blkno = vacrelstats->rel_pages;
2018  "prefetch size must be power of 2");
2019  prefetchedUntil = InvalidBlockNumber;
2020  while (blkno > vacrelstats->nonempty_pages)
2021  {
2022  Buffer buf;
2023  Page page;
2024  OffsetNumber offnum,
2025  maxoff;
2026  bool hastup;
2027 
2028  /*
2029  * Check if another process requests a lock on our relation. We are
2030  * holding an AccessExclusiveLock here, so they will be waiting. We
2031  * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
2032  * only check if that interval has elapsed once every 32 blocks to
2033  * keep the number of system calls and actual shared lock table
2034  * lookups to a minimum.
2035  */
2036  if ((blkno % 32) == 0)
2037  {
2038  instr_time currenttime;
2039  instr_time elapsed;
2040 
2041  INSTR_TIME_SET_CURRENT(currenttime);
2042  elapsed = currenttime;
2043  INSTR_TIME_SUBTRACT(elapsed, starttime);
2044  if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
2046  {
2048  {
2049  ereport(elevel,
2050  (errmsg("\"%s\": suspending truncate due to conflicting lock request",
2051  RelationGetRelationName(onerel))));
2052 
2053  vacrelstats->lock_waiter_detected = true;
2054  return blkno;
2055  }
2056  starttime = currenttime;
2057  }
2058  }
2059 
2060  /*
2061  * We don't insert a vacuum delay point here, because we have an
2062  * exclusive lock on the table which we want to hold for as short a
2063  * time as possible. We still need to check for interrupts however.
2064  */
2066 
2067  blkno--;
2068 
2069  /* If we haven't prefetched this lot yet, do so now. */
2070  if (prefetchedUntil > blkno)
2071  {
2072  BlockNumber prefetchStart;
2073  BlockNumber pblkno;
2074 
2075  prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
2076  for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
2077  {
2078  PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno);
2080  }
2081  prefetchedUntil = prefetchStart;
2082  }
2083 
2084  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
2086 
2087  /* In this phase we only need shared access to the buffer */
2089 
2090  page = BufferGetPage(buf);
2091 
2092  if (PageIsNew(page) || PageIsEmpty(page))
2093  {
2094  UnlockReleaseBuffer(buf);
2095  continue;
2096  }
2097 
2098  hastup = false;
2099  maxoff = PageGetMaxOffsetNumber(page);
2100  for (offnum = FirstOffsetNumber;
2101  offnum <= maxoff;
2102  offnum = OffsetNumberNext(offnum))
2103  {
2104  ItemId itemid;
2105 
2106  itemid = PageGetItemId(page, offnum);
2107 
2108  /*
2109  * Note: any non-unused item should be taken as a reason to keep
2110  * this page. We formerly thought that DEAD tuples could be
2111  * thrown away, but that's not so, because we'd not have cleaned
2112  * out their index entries.
2113  */
2114  if (ItemIdIsUsed(itemid))
2115  {
2116  hastup = true;
2117  break; /* can stop scanning */
2118  }
2119  } /* scan along page */
2120 
2121  UnlockReleaseBuffer(buf);
2122 
2123  /* Done scanning if we found a tuple here */
2124  if (hastup)
2125  return blkno + 1;
2126  }
2127 
2128  /*
2129  * If we fall out of the loop, all the previously-thought-to-be-empty
2130  * pages still are; we need not bother to look at the last known-nonempty
2131  * page.
2132  */
2133  return vacrelstats->nonempty_pages;
2134 }
#define PageIsEmpty(page)
Definition: bufpage.h:222
BlockNumber rel_pages
Definition: vacuumlazy.c:119
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:642
struct timeval instr_time
Definition: instr_time.h:150
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:842
#define PREFETCH_SIZE
Definition: vacuumlazy.c:111
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3388
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:170
static char * buf
Definition: pg_test_fsync.c:68
#define FirstOffsetNumber
Definition: off.h:27
#define RelationGetRelationName(relation)
Definition: rel.h:453
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define ereport(elevel, rest)
Definition: elog.h:141
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
static int elevel
Definition: vacuumlazy.c:143
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:81
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3602
void PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:531
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:345
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:149
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:205
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define InvalidBlockNumber
Definition: block.h:33
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:156
#define AccessExclusiveLock
Definition: lockdefs.h:45
BlockNumber nonempty_pages
Definition: vacuumlazy.c:130
#define PageIsNew(page)
Definition: bufpage.h:229
int errmsg(const char *fmt,...)
Definition: elog.c:784
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:87
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:78
bool lock_waiter_detected
Definition: vacuumlazy.c:138

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( Relation  rel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool all_frozen 
)
static

Definition at line 2253 of file vacuumlazy.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, OffsetNumberNext, OldestXmin, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), and TransactionIdPrecedes().

Referenced by lazy_vacuum_page().

2256 {
2257  Page page = BufferGetPage(buf);
2259  OffsetNumber offnum,
2260  maxoff;
2261  bool all_visible = true;
2262 
2263  *visibility_cutoff_xid = InvalidTransactionId;
2264  *all_frozen = true;
2265 
2266  /*
2267  * This is a stripped down version of the line pointer scan in
2268  * lazy_scan_heap(). So if you change anything here, also check that code.
2269  */
2270  maxoff = PageGetMaxOffsetNumber(page);
2271  for (offnum = FirstOffsetNumber;
2272  offnum <= maxoff && all_visible;
2273  offnum = OffsetNumberNext(offnum))
2274  {
2275  ItemId itemid;
2276  HeapTupleData tuple;
2277 
2278  itemid = PageGetItemId(page, offnum);
2279 
2280  /* Unused or redirect line pointers are of no interest */
2281  if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
2282  continue;
2283 
2284  ItemPointerSet(&(tuple.t_self), blockno, offnum);
2285 
2286  /*
2287  * Dead line pointers can have index pointers pointing to them. So
2288  * they can't be treated as visible
2289  */
2290  if (ItemIdIsDead(itemid))
2291  {
2292  all_visible = false;
2293  *all_frozen = false;
2294  break;
2295  }
2296 
2297  Assert(ItemIdIsNormal(itemid));
2298 
2299  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2300  tuple.t_len = ItemIdGetLength(itemid);
2301  tuple.t_tableOid = RelationGetRelid(rel);
2302 
2303  switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
2304  {
2305  case HEAPTUPLE_LIVE:
2306  {
2307  TransactionId xmin;
2308 
2309  /* Check comments in lazy_scan_heap. */
2311  {
2312  all_visible = false;
2313  *all_frozen = false;
2314  break;
2315  }
2316 
2317  /*
2318  * The inserter definitely committed. But is it old enough
2319  * that everyone sees it as committed?
2320  */
2321  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
2322  if (!TransactionIdPrecedes(xmin, OldestXmin))
2323  {
2324  all_visible = false;
2325  *all_frozen = false;
2326  break;
2327  }
2328 
2329  /* Track newest xmin on page. */
2330  if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
2331  *visibility_cutoff_xid = xmin;
2332 
2333  /* Check whether this tuple is already frozen or not */
2334  if (all_visible && *all_frozen &&
2336  *all_frozen = false;
2337  }
2338  break;
2339 
2340  case HEAPTUPLE_DEAD:
2344  {
2345  all_visible = false;
2346  *all_frozen = false;
2347  break;
2348  }
2349  default:
2350  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2351  break;
2352  }
2353  } /* scan along page */
2354 
2355  return all_visible;
2356 }
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:334
uint32 TransactionId
Definition: c.h:507
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
uint32 BlockNumber
Definition: block.h:31
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:6763
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:324
uint32 t_len
Definition: htup.h:64
static char * buf
Definition: pg_test_fsync.c:68
#define FirstOffsetNumber
Definition: off.h:27
#define InvalidTransactionId
Definition: transam.h:31
static TransactionId OldestXmin
Definition: vacuumlazy.c:145
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define Assert(condition)
Definition: c.h:732
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2613
#define elog(elevel,...)
Definition: elog.h:226
#define RelationGetRelid(relation)
Definition: rel.h:419
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  onerel,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 190 of file vacuumlazy.c.

References _, appendStringInfo(), Assert, buf, StringInfoData::data, DEBUG1, DEBUG2, elevel, ereport, errmsg(), errmsg_internal(), VacuumParams::freeze_min_age, VacuumParams::freeze_table_age, FreezeLimit, LVRelStats::frozenskipped_pages, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), VacuumParams::index_cleanup, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsAutoVacuumWorkerProcess(), lazy_scan_heap(), lazy_truncate_heap(), LVRelStats::lock_waiter_detected, LOG, VacuumParams::log_min_duration, VacuumParams::multixact_freeze_min_age, VacuumParams::multixact_freeze_table_age, MultiXactCutoff, MultiXactIdIsValid, MultiXactIdPrecedesOrEquals(), MyDatabaseId, LVRelStats::new_dead_tuples, LVRelStats::new_live_tuples, LVRelStats::new_rel_tuples, NoLock, LVRelStats::num_index_scans, LVRelStats::old_live_tuples, LVRelStats::old_rel_pages, OldestXmin, VacuumParams::options, LVRelStats::pages_removed, palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), LVRelStats::pinskipped_pages, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, RelationData::rd_rel, LVRelStats::rel_pages, RelationGetNamespace, RelationGetRelationName, RelationGetRelid, RowExclusiveLock, LVRelStats::scanned_pages, should_attempt_truncation(), TimestampDifference(), TimestampDifferenceExceeds(), TransactionIdIsNormal, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelStats::tupcount_pages, LVRelStats::tuples_deleted, LVRelStats::useindex, vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_TERNARY_DEFAULT, VACOPT_TERNARY_ENABLED, VACOPT_VERBOSE, vacuum_set_xid_limits(), VacuumPageDirty, VacuumPageHit, VacuumPageMiss, and visibilitymap_count().

Referenced by SampleHeapTupleVisible().

192 {
193  LVRelStats *vacrelstats;
194  Relation *Irel;
195  int nindexes;
196  PGRUsage ru0;
197  TimestampTz starttime = 0;
198  long secs;
199  int usecs;
200  double read_rate,
201  write_rate;
202  bool aggressive; /* should we scan all unfrozen pages? */
203  bool scanned_all_unfrozen; /* actually scanned all such pages? */
204  TransactionId xidFullScanLimit;
205  MultiXactId mxactFullScanLimit;
206  BlockNumber new_rel_pages;
207  BlockNumber new_rel_allvisible;
208  double new_live_tuples;
209  TransactionId new_frozen_xid;
210  MultiXactId new_min_multi;
211 
212  Assert(params != NULL);
215 
216  /* not every AM requires these to be valid, but heap does */
217  Assert(TransactionIdIsNormal(onerel->rd_rel->relfrozenxid));
218  Assert(MultiXactIdIsValid(onerel->rd_rel->relminmxid));
219 
220  /* measure elapsed time iff autovacuum logging requires it */
221  if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
222  {
223  pg_rusage_init(&ru0);
224  starttime = GetCurrentTimestamp();
225  }
226 
227  if (params->options & VACOPT_VERBOSE)
228  elevel = INFO;
229  else
230  elevel = DEBUG2;
231 
233  RelationGetRelid(onerel));
234 
235  vac_strategy = bstrategy;
236 
237  vacuum_set_xid_limits(onerel,
238  params->freeze_min_age,
239  params->freeze_table_age,
240  params->multixact_freeze_min_age,
242  &OldestXmin, &FreezeLimit, &xidFullScanLimit,
243  &MultiXactCutoff, &mxactFullScanLimit);
244 
245  /*
246  * We request an aggressive scan if the table's frozen Xid is now older
247  * than or equal to the requested Xid full-table scan limit; or if the
248  * table's minimum MultiXactId is older than or equal to the requested
249  * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
250  */
251  aggressive = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
252  xidFullScanLimit);
253  aggressive |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
254  mxactFullScanLimit);
256  aggressive = true;
257 
258  /*
259  * Normally the relfrozenxid for an anti-wraparound vacuum will be old
260  * enough to force an aggressive vacuum. However, a concurrent vacuum
261  * might have already done this work that the relfrozenxid in relcache has
262  * been updated. If that happens this vacuum is redundant, so skip it.
263  */
264  if (params->is_wraparound && !aggressive)
265  {
266  ereport(DEBUG1,
267  (errmsg("skipping redundant vacuum to prevent wraparound of table \"%s.%s.%s\"",
270  RelationGetRelationName(onerel))));
272  return;
273  }
274 
275  vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
276 
277  vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
278  vacrelstats->old_live_tuples = onerel->rd_rel->reltuples;
279  vacrelstats->num_index_scans = 0;
280  vacrelstats->pages_removed = 0;
281  vacrelstats->lock_waiter_detected = false;
282 
283  /* Open all indexes of the relation */
284  vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
285  vacrelstats->useindex = (nindexes > 0 &&
287 
288  /* Do the vacuuming */
289  lazy_scan_heap(onerel, params, vacrelstats, Irel, nindexes, aggressive);
290 
291  /* Done with indexes */
292  vac_close_indexes(nindexes, Irel, NoLock);
293 
294  /*
295  * Compute whether we actually scanned the all unfrozen pages. If we did,
296  * we can adjust relfrozenxid and relminmxid.
297  *
298  * NB: We need to check this before truncating the relation, because that
299  * will change ->rel_pages.
300  */
301  if ((vacrelstats->scanned_pages + vacrelstats->frozenskipped_pages)
302  < vacrelstats->rel_pages)
303  {
304  Assert(!aggressive);
305  scanned_all_unfrozen = false;
306  }
307  else
308  scanned_all_unfrozen = true;
309 
310  /*
311  * Optionally truncate the relation.
312  */
313  if (should_attempt_truncation(params, vacrelstats))
314  lazy_truncate_heap(onerel, vacrelstats);
315 
316  /* Report that we are now doing final cleanup */
319 
320  /*
321  * Update statistics in pg_class.
322  *
323  * A corner case here is that if we scanned no pages at all because every
324  * page is all-visible, we should not update relpages/reltuples, because
325  * we have no new information to contribute. In particular this keeps us
326  * from replacing relpages=reltuples=0 (which means "unknown tuple
327  * density") with nonzero relpages and reltuples=0 (which means "zero
328  * tuple density") unless there's some actual evidence for the latter.
329  *
330  * It's important that we use tupcount_pages and not scanned_pages for the
331  * check described above; scanned_pages counts pages where we could not
332  * get cleanup lock, and which were processed only for frozenxid purposes.
333  *
334  * We do update relallvisible even in the corner case, since if the table
335  * is all-visible we'd definitely like to know that. But clamp the value
336  * to be not more than what we're setting relpages to.
337  *
338  * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
339  * since then we don't know for certain that all tuples have a newer xmin.
340  */
341  new_rel_pages = vacrelstats->rel_pages;
342  new_live_tuples = vacrelstats->new_live_tuples;
343  if (vacrelstats->tupcount_pages == 0 && new_rel_pages > 0)
344  {
345  new_rel_pages = vacrelstats->old_rel_pages;
346  new_live_tuples = vacrelstats->old_live_tuples;
347  }
348 
349  visibilitymap_count(onerel, &new_rel_allvisible, NULL);
350  if (new_rel_allvisible > new_rel_pages)
351  new_rel_allvisible = new_rel_pages;
352 
353  new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
354  new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
355 
356  vac_update_relstats(onerel,
357  new_rel_pages,
358  new_live_tuples,
359  new_rel_allvisible,
360  nindexes > 0,
361  new_frozen_xid,
362  new_min_multi,
363  false);
364 
365  /* report results to the stats collector, too */
367  onerel->rd_rel->relisshared,
368  new_live_tuples,
369  vacrelstats->new_dead_tuples);
371 
372  /* and log the action if appropriate */
373  if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
374  {
375  TimestampTz endtime = GetCurrentTimestamp();
376 
377  if (params->log_min_duration == 0 ||
378  TimestampDifferenceExceeds(starttime, endtime,
379  params->log_min_duration))
380  {
382  char *msgfmt;
383 
384  TimestampDifference(starttime, endtime, &secs, &usecs);
385 
386  read_rate = 0;
387  write_rate = 0;
388  if ((secs > 0) || (usecs > 0))
389  {
390  read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
391  (secs + usecs / 1000000.0);
392  write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
393  (secs + usecs / 1000000.0);
394  }
395 
396  /*
397  * This is pretty messy, but we split it up so that we can skip
398  * emitting individual parts of the message when not applicable.
399  */
400  initStringInfo(&buf);
401  if (params->is_wraparound)
402  {
403  /* an anti-wraparound vacuum has to be aggressive */
404  Assert(aggressive);
405  msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
406  }
407  else
408  {
409  if (aggressive)
410  msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
411  else
412  msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
413  }
414  appendStringInfo(&buf, msgfmt,
417  RelationGetRelationName(onerel),
418  vacrelstats->num_index_scans);
419  appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
420  vacrelstats->pages_removed,
421  vacrelstats->rel_pages,
422  vacrelstats->pinskipped_pages,
423  vacrelstats->frozenskipped_pages);
424  appendStringInfo(&buf,
425  _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n"),
426  vacrelstats->tuples_deleted,
427  vacrelstats->new_rel_tuples,
428  vacrelstats->new_dead_tuples,
429  OldestXmin);
430  appendStringInfo(&buf,
431  _("buffer usage: %d hits, %d misses, %d dirtied\n"),
435  appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
436  read_rate, write_rate);
437  appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
438 
439  ereport(LOG,
440  (errmsg_internal("%s", buf.data)));
441  pfree(buf.data);
442  }
443  }
444 }
double new_rel_tuples
Definition: vacuumlazy.c:125
int multixact_freeze_table_age
Definition: vacuum.h:177
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:1925
#define DEBUG1
Definition: elog.h:25
BlockNumber rel_pages
Definition: vacuumlazy.c:119
uint32 TransactionId
Definition: c.h:507
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
Definition: pgstat.c:3199
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1583
double tuples_deleted
Definition: vacuumlazy.c:129
int64 TimestampTz
Definition: timestamp.h:39
int VacuumPageHit
Definition: globals.c:143
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3220
bool useindex
Definition: vacuumlazy.c:116
BlockNumber tupcount_pages
Definition: vacuumlazy.c:123
BlockNumber scanned_pages
Definition: vacuumlazy.c:120
#define INFO
Definition: elog.h:33
void vacuum_set_xid_limits(Relation rel, int freeze_min_age, int freeze_table_age, int multixact_freeze_min_age, int multixact_freeze_table_age, TransactionId *oldestXmin, TransactionId *freezeLimit, TransactionId *xidFullScanLimit, MultiXactId *multiXactCutoff, MultiXactId *mxactFullScanLimit)
Definition: vacuum.c:880
uint32 BlockNumber
Definition: block.h:31
BlockNumber pinskipped_pages
Definition: vacuumlazy.c:121
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
#define LOG
Definition: elog.h:26
Form_pg_class rd_rel
Definition: rel.h:83
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1682
int freeze_table_age
Definition: vacuum.h:174
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
BlockNumber old_rel_pages
Definition: vacuumlazy.c:118
void pfree(void *pointer)
Definition: mcxt.c:1056
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:78
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:319
int freeze_min_age
Definition: vacuum.h:173
bool is_wraparound
Definition: vacuum.h:179
char * get_database_name(Oid dbid)
Definition: dbcommands.c:2100
#define DEBUG2
Definition: elog.h:24
static TransactionId FreezeLimit
Definition: vacuumlazy.c:146
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3094
#define NoLock
Definition: lockdefs.h:34
static char * buf
Definition: pg_test_fsync.c:68
#define RowExclusiveLock
Definition: lockdefs.h:38
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:147
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:453
static TransactionId OldestXmin
Definition: vacuumlazy.c:145
#define MultiXactIdIsValid(multi)
Definition: multixact.h:27
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3278
#define ereport(elevel, rest)
Definition: elog.h:141
double new_live_tuples
Definition: vacuumlazy.c:126
VacOptTernaryValue index_cleanup
Definition: vacuum.h:183
void initStringInfo(StringInfo str)
Definition: stringinfo.c:46
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:1882
static int elevel
Definition: vacuumlazy.c:143
int VacuumPageDirty
Definition: globals.c:145
void * palloc0(Size size)
Definition: mcxt.c:980
void pgstat_progress_end_command(void)
Definition: pgstat.c:3271
Oid MyDatabaseId
Definition: globals.c:85
#define InvalidMultiXactId
Definition: multixact.h:23
static bool should_attempt_truncation(VacuumParams *params, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1852
VacOptTernaryValue truncate
Definition: vacuum.h:185
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:35
int num_index_scans
Definition: vacuumlazy.c:136
double old_live_tuples
Definition: vacuumlazy.c:124
TransactionId MultiXactId
Definition: c.h:517
int errmsg_internal(const char *fmt,...)
Definition: elog.c:814
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:149
#define Assert(condition)
Definition: c.h:732
double new_dead_tuples
Definition: vacuumlazy.c:127
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
int log_min_duration
Definition: vacuum.h:180
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
Definition: pgstat.c:1410
BlockNumber pages_removed
Definition: vacuumlazy.c:128
int errmsg(const char *fmt,...)
Definition: elog.c:784
static void lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, Relation *Irel, int nindexes, bool aggressive)
Definition: vacuumlazy.c:496
BlockNumber frozenskipped_pages
Definition: vacuumlazy.c:122
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3156
int options
Definition: vacuum.h:172
int VacuumPageMiss
Definition: globals.c:144
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1657
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool in_outer_xact)
Definition: vacuum.c:1157
#define _(x)
Definition: elog.c:84
#define RelationGetRelid(relation)
Definition: rel.h:419
int multixact_freeze_min_age
Definition: vacuum.h:175
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1873
#define RelationGetNamespace(relation)
Definition: rel.h:460
bool lock_waiter_detected
Definition: vacuumlazy.c:138

◆ lazy_check_needs_freeze()

static bool lazy_check_needs_freeze ( Buffer  buf,
bool hastup 
)
static

Definition at line 1689 of file vacuumlazy.c.

References BufferGetPage, FirstOffsetNumber, FreezeLimit, heap_tuple_needs_freeze(), ItemIdIsNormal, ItemIdIsUsed, MultiXactCutoff, OffsetNumberNext, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, and PageIsNew.

Referenced by lazy_scan_heap().

1690 {
1691  Page page = BufferGetPage(buf);
1692  OffsetNumber offnum,
1693  maxoff;
1694  HeapTupleHeader tupleheader;
1695 
1696  *hastup = false;
1697 
1698  /*
1699  * New and empty pages, obviously, don't contain tuples. We could make
1700  * sure that the page is registered in the FSM, but it doesn't seem worth
1701  * waiting for a cleanup lock just for that, especially because it's
1702  * likely that the pin holder will do so.
1703  */
1704  if (PageIsNew(page) || PageIsEmpty(page))
1705  return false;
1706 
1707  maxoff = PageGetMaxOffsetNumber(page);
1708  for (offnum = FirstOffsetNumber;
1709  offnum <= maxoff;
1710  offnum = OffsetNumberNext(offnum))
1711  {
1712  ItemId itemid;
1713 
1714  itemid = PageGetItemId(page, offnum);
1715 
1716  /* this should match hastup test in count_nondeletable_pages() */
1717  if (ItemIdIsUsed(itemid))
1718  *hastup = true;
1719 
1720  /* dead and redirect items never need freezing */
1721  if (!ItemIdIsNormal(itemid))
1722  continue;
1723 
1724  tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1725 
1726  if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
1727  MultiXactCutoff, buf))
1728  return true;
1729  } /* scan along page */
1730 
1731  return false;
1732 }
#define PageIsEmpty(page)
Definition: bufpage.h:222
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
static TransactionId FreezeLimit
Definition: vacuumlazy.c:146
static char * buf
Definition: pg_test_fsync.c:68
#define FirstOffsetNumber
Definition: off.h:27
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:147
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
Definition: heapam.c:6816
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define PageIsNew(page)
Definition: bufpage.h:229
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ lazy_cleanup_index()

static void lazy_cleanup_index ( Relation  indrel,
IndexBulkDeleteResult stats,
LVRelStats vacrelstats 
)
static

Definition at line 1775 of file vacuumlazy.c.

References IndexVacuumInfo::analyze_only, elevel, ereport, errdetail(), errmsg(), IndexVacuumInfo::estimated_count, IndexBulkDeleteResult::estimated_count, IndexVacuumInfo::index, index_vacuum_cleanup(), InvalidMultiXactId, InvalidTransactionId, IndexVacuumInfo::message_level, LVRelStats::new_rel_tuples, IndexVacuumInfo::num_heap_tuples, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, pfree(), pg_rusage_init(), pg_rusage_show(), LVRelStats::rel_pages, RelationGetRelationName, IndexVacuumInfo::report_progress, IndexVacuumInfo::strategy, LVRelStats::tupcount_pages, IndexBulkDeleteResult::tuples_removed, vac_strategy, and vac_update_relstats().

Referenced by lazy_scan_heap().

1778 {
1779  IndexVacuumInfo ivinfo;
1780  PGRUsage ru0;
1781 
1782  pg_rusage_init(&ru0);
1783 
1784  ivinfo.index = indrel;
1785  ivinfo.analyze_only = false;
1786  ivinfo.report_progress = false;
1787  ivinfo.estimated_count = (vacrelstats->tupcount_pages < vacrelstats->rel_pages);
1788  ivinfo.message_level = elevel;
1789 
1790  /*
1791  * Now we can provide a better estimate of total number of surviving
1792  * tuples (we assume indexes are more interested in that than in the
1793  * number of nominally live tuples).
1794  */
1795  ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
1796  ivinfo.strategy = vac_strategy;
1797 
1798  stats = index_vacuum_cleanup(&ivinfo, stats);
1799 
1800  if (!stats)
1801  return;
1802 
1803  /*
1804  * Now update statistics in pg_class, but only if the index says the count
1805  * is accurate.
1806  */
1807  if (!stats->estimated_count)
1808  vac_update_relstats(indrel,
1809  stats->num_pages,
1810  stats->num_index_tuples,
1811  0,
1812  false,
1815  false);
1816 
1817  ereport(elevel,
1818  (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
1819  RelationGetRelationName(indrel),
1820  stats->num_index_tuples,
1821  stats->num_pages),
1822  errdetail("%.0f index row versions were removed.\n"
1823  "%u index pages have been deleted, %u are currently reusable.\n"
1824  "%s.",
1825  stats->tuples_removed,
1826  stats->pages_deleted, stats->pages_free,
1827  pg_rusage_show(&ru0))));
1828 
1829  pfree(stats);
1830 }
double new_rel_tuples
Definition: vacuumlazy.c:125
double tuples_removed
Definition: genam.h:78
BlockNumber rel_pages
Definition: vacuumlazy.c:119
bool analyze_only
Definition: genam.h:47
BlockNumber tupcount_pages
Definition: vacuumlazy.c:123
bool report_progress
Definition: genam.h:48
BufferAccessStrategy strategy
Definition: genam.h:52
Relation index
Definition: genam.h:46
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
void pfree(void *pointer)
Definition: mcxt.c:1056
BlockNumber num_pages
Definition: genam.h:74
BlockNumber pages_free
Definition: genam.h:80
int errdetail(const char *fmt,...)
Definition: elog.c:860
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:453
BlockNumber pages_deleted
Definition: genam.h:79
#define ereport(elevel, rest)
Definition: elog.h:141
static int elevel
Definition: vacuumlazy.c:143
#define InvalidMultiXactId
Definition: multixact.h:23
int message_level
Definition: genam.h:50
double num_heap_tuples
Definition: genam.h:51
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:149
IndexBulkDeleteResult * index_vacuum_cleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: indexam.c:703
int errmsg(const char *fmt,...)
Definition: elog.c:784
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool in_outer_xact)
Definition: vacuum.c:1157
double num_index_tuples
Definition: genam.h:77
bool estimated_count
Definition: genam.h:76
bool estimated_count
Definition: genam.h:49

◆ lazy_record_dead_tuple()

static void lazy_record_dead_tuple ( LVRelStats vacrelstats,
ItemPointer  itemptr 
)
static

Definition at line 2177 of file vacuumlazy.c.

References LVRelStats::dead_tuples, LVRelStats::max_dead_tuples, LVRelStats::num_dead_tuples, pgstat_progress_update_param(), and PROGRESS_VACUUM_NUM_DEAD_TUPLES.

Referenced by lazy_scan_heap().

2179 {
2180  /*
2181  * The array shouldn't overflow under normal behavior, but perhaps it
2182  * could if we are given a really small maintenance_work_mem. In that
2183  * case, just forget the last few tuples (we'll get 'em next time).
2184  */
2185  if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
2186  {
2187  vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
2188  vacrelstats->num_dead_tuples++;
2190  vacrelstats->num_dead_tuples);
2191  }
2192 }
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3220
int max_dead_tuples
Definition: vacuumlazy.c:134
ItemPointer dead_tuples
Definition: vacuumlazy.c:135
int num_dead_tuples
Definition: vacuumlazy.c:133
#define PROGRESS_VACUUM_NUM_DEAD_TUPLES
Definition: progress.h:27

◆ lazy_scan_heap()

static void lazy_scan_heap ( Relation  onerel,
VacuumParams params,
LVRelStats vacrelstats,
Relation Irel,
int  nindexes,
bool  aggressive 
)
static

Definition at line 496 of file vacuumlazy.c.

References _, appendStringInfo(), Assert, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferGetPageSize, BufferIsValid, ConditionalLockBufferForCleanup(), StringInfoData::data, elevel, elog, END_CRIT_SECTION, ereport, errdetail_internal(), errmsg(), ERROR, FirstOffsetNumber, FORCE_CHECK_PAGE, FreeSpaceMapVacuumRange(), FreezeLimit, LVRelStats::frozenskipped_pages, get_namespace_name(), GetRecordedFreeSpace(), heap_execute_freeze_tuple(), heap_page_prune(), heap_prepare_freeze_tuple(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderAdvanceLatestRemovedXid(), HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleSatisfiesVacuum(), i, VacuumParams::index_cleanup, initStringInfo(), InvalidBuffer, InvalidTransactionId, InvalidXLogRecPtr, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, LVRelStats::latestRemovedXid, lazy_check_needs_freeze(), lazy_cleanup_index(), lazy_record_dead_tuple(), lazy_space_alloc(), lazy_vacuum_heap(), lazy_vacuum_index(), lazy_vacuum_page(), LockBuffer(), LockBufferForCleanup(), log_heap_freeze(), log_newpage_buffer(), MAIN_FORKNUM, MarkBufferDirty(), LVRelStats::max_dead_tuples, MaxHeapTuplesPerPage, MultiXactCutoff, LVRelStats::new_dead_tuples, LVRelStats::new_live_tuples, LVRelStats::new_rel_tuples, ngettext, LVRelStats::nonempty_pages, LVRelStats::num_dead_tuples, LVRelStats::num_index_scans, xl_heap_freeze_tuple::offset, OffsetNumberNext, OldestXmin, VacuumParams::options, PageClearAllVisible, PageGetHeapFreeSpace(), PageGetItem, PageGetItemId, PageGetLSN, PageGetMaxOffsetNumber, PageIsAllVisible, PageIsEmpty, PageIsNew, PageSetAllVisible, PageSetLSN, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), LVRelStats::pinskipped_pages, PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLES, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, RBM_NORMAL, RelationData::rd_rel, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelStats::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), relfrozenxid, relminmxid, relname, LVRelStats::scanned_pages, SizeOfPageHeaderData, SKIP_PAGES_THRESHOLD, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdPrecedes(), LVRelStats::tupcount_pages, LVRelStats::tuples_deleted, UnlockReleaseBuffer(), LVRelStats::useindex, vac_estimate_reltuples(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_TERNARY_DISABLED, vacuum_delay_point(), VACUUM_FSM_EVERY_PAGES, vacuum_log_cleanup_info(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, VM_ALL_FROZEN, VM_ALL_VISIBLE, and WARNING.

Referenced by heap_vacuum_rel().

498 {
499  BlockNumber nblocks,
500  blkno;
501  HeapTupleData tuple;
502  char *relname;
503  TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid;
504  TransactionId relminmxid = onerel->rd_rel->relminmxid;
505  BlockNumber empty_pages,
506  vacuumed_pages,
507  next_fsm_block_to_vacuum;
508  double num_tuples, /* total number of nonremovable tuples */
509  live_tuples, /* live tuples (reltuples estimate) */
510  tups_vacuumed, /* tuples cleaned up by vacuum */
511  nkeep, /* dead-but-not-removable tuples */
512  nunused; /* unused line pointers */
513  IndexBulkDeleteResult **indstats;
514  int i;
515  PGRUsage ru0;
516  Buffer vmbuffer = InvalidBuffer;
517  BlockNumber next_unskippable_block;
518  bool skipping_blocks;
519  xl_heap_freeze_tuple *frozen;
521  const int initprog_index[] = {
525  };
526  int64 initprog_val[3];
527 
528  pg_rusage_init(&ru0);
529 
530  relname = RelationGetRelationName(onerel);
531  if (aggressive)
532  ereport(elevel,
533  (errmsg("aggressively vacuuming \"%s.%s\"",
535  relname)));
536  else
537  ereport(elevel,
538  (errmsg("vacuuming \"%s.%s\"",
540  relname)));
541 
542  empty_pages = vacuumed_pages = 0;
543  next_fsm_block_to_vacuum = (BlockNumber) 0;
544  num_tuples = live_tuples = tups_vacuumed = nkeep = nunused = 0;
545 
546  indstats = (IndexBulkDeleteResult **)
547  palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
548 
549  nblocks = RelationGetNumberOfBlocks(onerel);
550  vacrelstats->rel_pages = nblocks;
551  vacrelstats->scanned_pages = 0;
552  vacrelstats->tupcount_pages = 0;
553  vacrelstats->nonempty_pages = 0;
554  vacrelstats->latestRemovedXid = InvalidTransactionId;
555 
556  lazy_space_alloc(vacrelstats, nblocks);
558 
559  /* Report that we're scanning the heap, advertising total # of blocks */
560  initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
561  initprog_val[1] = nblocks;
562  initprog_val[2] = vacrelstats->max_dead_tuples;
563  pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
564 
565  /*
566  * Except when aggressive is set, we want to skip pages that are
567  * all-visible according to the visibility map, but only when we can skip
568  * at least SKIP_PAGES_THRESHOLD consecutive pages. Since we're reading
569  * sequentially, the OS should be doing readahead for us, so there's no
570  * gain in skipping a page now and then; that's likely to disable
571  * readahead and so be counterproductive. Also, skipping even a single
572  * page means that we can't update relfrozenxid, so we only want to do it
573  * if we can skip a goodly number of pages.
574  *
575  * When aggressive is set, we can't skip pages just because they are
576  * all-visible, but we can still skip pages that are all-frozen, since
577  * such pages do not need freezing and do not affect the value that we can
578  * safely set for relfrozenxid or relminmxid.
579  *
580  * Before entering the main loop, establish the invariant that
581  * next_unskippable_block is the next block number >= blkno that we can't
582  * skip based on the visibility map, either all-visible for a regular scan
583  * or all-frozen for an aggressive scan. We set it to nblocks if there's
584  * no such block. We also set up the skipping_blocks flag correctly at
585  * this stage.
586  *
587  * Note: The value returned by visibilitymap_get_status could be slightly
588  * out-of-date, since we make this test before reading the corresponding
589  * heap page or locking the buffer. This is OK. If we mistakenly think
590  * that the page is all-visible or all-frozen when in fact the flag's just
591  * been cleared, we might fail to vacuum the page. It's easy to see that
592  * skipping a page when aggressive is not set is not a very big deal; we
593  * might leave some dead tuples lying around, but the next vacuum will
594  * find them. But even when aggressive *is* set, it's still OK if we miss
595  * a page whose all-frozen marking has just been cleared. Any new XIDs
596  * just added to that page are necessarily newer than the GlobalXmin we
597  * computed, so they'll have no effect on the value to which we can safely
598  * set relfrozenxid. A similar argument applies for MXIDs and relminmxid.
599  *
600  * We will scan the table's last page, at least to the extent of
601  * determining whether it has tuples or not, even if it should be skipped
602  * according to the above rules; except when we've already determined that
603  * it's not worth trying to truncate the table. This avoids having
604  * lazy_truncate_heap() take access-exclusive lock on the table to attempt
605  * a truncation that just fails immediately because there are tuples in
606  * the last page. This is worth avoiding mainly because such a lock must
607  * be replayed on any hot standby, where it can be disruptive.
608  */
609  next_unskippable_block = 0;
610  if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
611  {
612  while (next_unskippable_block < nblocks)
613  {
614  uint8 vmstatus;
615 
616  vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
617  &vmbuffer);
618  if (aggressive)
619  {
620  if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
621  break;
622  }
623  else
624  {
625  if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
626  break;
627  }
629  next_unskippable_block++;
630  }
631  }
632 
633  if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
634  skipping_blocks = true;
635  else
636  skipping_blocks = false;
637 
638  for (blkno = 0; blkno < nblocks; blkno++)
639  {
640  Buffer buf;
641  Page page;
642  OffsetNumber offnum,
643  maxoff;
644  bool tupgone,
645  hastup;
646  int prev_dead_count;
647  int nfrozen;
648  Size freespace;
649  bool all_visible_according_to_vm = false;
650  bool all_visible;
651  bool all_frozen = true; /* provided all_visible is also true */
652  bool has_dead_tuples;
653  TransactionId visibility_cutoff_xid = InvalidTransactionId;
654 
655  /* see note above about forcing scanning of last page */
656 #define FORCE_CHECK_PAGE() \
657  (blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))
658 
660 
661  if (blkno == next_unskippable_block)
662  {
663  /* Time to advance next_unskippable_block */
664  next_unskippable_block++;
665  if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
666  {
667  while (next_unskippable_block < nblocks)
668  {
669  uint8 vmskipflags;
670 
671  vmskipflags = visibilitymap_get_status(onerel,
672  next_unskippable_block,
673  &vmbuffer);
674  if (aggressive)
675  {
676  if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
677  break;
678  }
679  else
680  {
681  if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
682  break;
683  }
685  next_unskippable_block++;
686  }
687  }
688 
689  /*
690  * We know we can't skip the current block. But set up
691  * skipping_blocks to do the right thing at the following blocks.
692  */
693  if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD)
694  skipping_blocks = true;
695  else
696  skipping_blocks = false;
697 
698  /*
699  * Normally, the fact that we can't skip this block must mean that
700  * it's not all-visible. But in an aggressive vacuum we know only
701  * that it's not all-frozen, so it might still be all-visible.
702  */
703  if (aggressive && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
704  all_visible_according_to_vm = true;
705  }
706  else
707  {
708  /*
709  * The current block is potentially skippable; if we've seen a
710  * long enough run of skippable blocks to justify skipping it, and
711  * we're not forced to check it, then go ahead and skip.
712  * Otherwise, the page must be at least all-visible if not
713  * all-frozen, so we can set all_visible_according_to_vm = true.
714  */
715  if (skipping_blocks && !FORCE_CHECK_PAGE())
716  {
717  /*
718  * Tricky, tricky. If this is in aggressive vacuum, the page
719  * must have been all-frozen at the time we checked whether it
720  * was skippable, but it might not be any more. We must be
721  * careful to count it as a skipped all-frozen page in that
722  * case, or else we'll think we can't update relfrozenxid and
723  * relminmxid. If it's not an aggressive vacuum, we don't
724  * know whether it was all-frozen, so we have to recheck; but
725  * in this case an approximate answer is OK.
726  */
727  if (aggressive || VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
728  vacrelstats->frozenskipped_pages++;
729  continue;
730  }
731  all_visible_according_to_vm = true;
732  }
733 
735 
736  /*
737  * If we are close to overrunning the available space for dead-tuple
738  * TIDs, pause and do a cycle of vacuuming before we tackle this page.
739  */
740  if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
741  vacrelstats->num_dead_tuples > 0)
742  {
743  const int hvp_index[] = {
746  };
747  int64 hvp_val[2];
748 
749  /*
750  * Before beginning index vacuuming, we release any pin we may
751  * hold on the visibility map page. This isn't necessary for
752  * correctness, but we do it anyway to avoid holding the pin
753  * across a lengthy, unrelated operation.
754  */
755  if (BufferIsValid(vmbuffer))
756  {
757  ReleaseBuffer(vmbuffer);
758  vmbuffer = InvalidBuffer;
759  }
760 
761  /* Log cleanup info before we touch indexes */
762  vacuum_log_cleanup_info(onerel, vacrelstats);
763 
764  /* Report that we are now vacuuming indexes */
767 
768  /* Remove index entries */
769  for (i = 0; i < nindexes; i++)
770  lazy_vacuum_index(Irel[i],
771  &indstats[i],
772  vacrelstats);
773 
774  /*
775  * Report that we are now vacuuming the heap. We also increase
776  * the number of index scans here; note that by using
777  * pgstat_progress_update_multi_param we can update both
778  * parameters atomically.
779  */
781  hvp_val[1] = vacrelstats->num_index_scans + 1;
782  pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
783 
784  /* Remove tuples from heap */
785  lazy_vacuum_heap(onerel, vacrelstats);
786 
787  /*
788  * Forget the now-vacuumed tuples, and press on, but be careful
789  * not to reset latestRemovedXid since we want that value to be
790  * valid.
791  */
792  vacrelstats->num_dead_tuples = 0;
793  vacrelstats->num_index_scans++;
794 
795  /*
796  * Vacuum the Free Space Map to make newly-freed space visible on
797  * upper-level FSM pages. Note we have not yet processed blkno.
798  */
799  FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno);
800  next_fsm_block_to_vacuum = blkno;
801 
802  /* Report that we are once again scanning the heap */
805  }
806 
807  /*
808  * Pin the visibility map page in case we need to mark the page
809  * all-visible. In most cases this will be very cheap, because we'll
810  * already have the correct page pinned anyway. However, it's
811  * possible that (a) next_unskippable_block is covered by a different
812  * VM page than the current block or (b) we released our pin and did a
813  * cycle of index vacuuming.
814  *
815  */
816  visibilitymap_pin(onerel, blkno, &vmbuffer);
817 
818  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
820 
821  /* We need buffer cleanup lock so that we can prune HOT chains. */
823  {
824  /*
825  * If we're not performing an aggressive scan to guard against XID
826  * wraparound, and we don't want to forcibly check the page, then
827  * it's OK to skip vacuuming pages we get a lock conflict on. They
828  * will be dealt with in some future vacuum.
829  */
830  if (!aggressive && !FORCE_CHECK_PAGE())
831  {
832  ReleaseBuffer(buf);
833  vacrelstats->pinskipped_pages++;
834  continue;
835  }
836 
837  /*
838  * Read the page with share lock to see if any xids on it need to
839  * be frozen. If not we just skip the page, after updating our
840  * scan statistics. If there are some, we wait for cleanup lock.
841  *
842  * We could defer the lock request further by remembering the page
843  * and coming back to it later, or we could even register
844  * ourselves for multiple buffers and then service whichever one
845  * is received first. For now, this seems good enough.
846  *
847  * If we get here with aggressive false, then we're just forcibly
848  * checking the page, and so we don't want to insist on getting
849  * the lock; we only need to know if the page contains tuples, so
850  * that we can update nonempty_pages correctly. It's convenient
851  * to use lazy_check_needs_freeze() for both situations, though.
852  */
854  if (!lazy_check_needs_freeze(buf, &hastup))
855  {
856  UnlockReleaseBuffer(buf);
857  vacrelstats->scanned_pages++;
858  vacrelstats->pinskipped_pages++;
859  if (hastup)
860  vacrelstats->nonempty_pages = blkno + 1;
861  continue;
862  }
863  if (!aggressive)
864  {
865  /*
866  * Here, we must not advance scanned_pages; that would amount
867  * to claiming that the page contains no freezable tuples.
868  */
869  UnlockReleaseBuffer(buf);
870  vacrelstats->pinskipped_pages++;
871  if (hastup)
872  vacrelstats->nonempty_pages = blkno + 1;
873  continue;
874  }
877  /* drop through to normal processing */
878  }
879 
880  vacrelstats->scanned_pages++;
881  vacrelstats->tupcount_pages++;
882 
883  page = BufferGetPage(buf);
884 
885  if (PageIsNew(page))
886  {
887  bool still_new;
888 
889  /*
890  * All-zeroes pages can be left over if either a backend extends
891  * the relation by a single page, but crashes before the newly
892  * initialized page has been written out, or when bulk-extending
893  * the relation (which creates a number of empty pages at the tail
894  * end of the relation, but enters them into the FSM).
895  *
896  * Make sure these pages are in the FSM, to ensure they can be
897  * reused. Do that by testing if there's any space recorded for
898  * the page. If not, enter it.
899  *
900  * Note we do not enter the page into the visibilitymap. That has
901  * the downside that we repeatedly visit this page in subsequent
902  * vacuums, but otherwise we'll never not discover the space on a
903  * promoted standby. The harm of repeated checking ought to
904  * normally not be too bad - the space usually should be used at
905  * some point, otherwise there wouldn't be any regular vacuums.
906  */
907 
908  /*
909  * Perform checking of FSM after releasing lock, the fsm is
910  * approximate, after all.
911  */
912  still_new = PageIsNew(page);
913  UnlockReleaseBuffer(buf);
914 
915  if (still_new)
916  {
917  empty_pages++;
918 
919  if (GetRecordedFreeSpace(onerel, blkno) == 0)
920  {
921  Size freespace;
922 
923  freespace = BufferGetPageSize(buf) - SizeOfPageHeaderData;
924  RecordPageWithFreeSpace(onerel, blkno, freespace);
925  }
926  }
927  continue;
928  }
929 
930  if (PageIsEmpty(page))
931  {
932  empty_pages++;
933  freespace = PageGetHeapFreeSpace(page);
934 
935  /*
936  * Empty pages are always all-visible and all-frozen (note that
937  * the same is currently not true for new pages, see above).
938  */
939  if (!PageIsAllVisible(page))
940  {
942 
943  /* mark buffer dirty before writing a WAL record */
944  MarkBufferDirty(buf);
945 
946  /*
947  * It's possible that another backend has extended the heap,
948  * initialized the page, and then failed to WAL-log the page
949  * due to an ERROR. Since heap extension is not WAL-logged,
950  * recovery might try to replay our record setting the page
951  * all-visible and find that the page isn't initialized, which
952  * will cause a PANIC. To prevent that, check whether the
953  * page has been previously WAL-logged, and if not, do that
954  * now.
955  */
956  if (RelationNeedsWAL(onerel) &&
957  PageGetLSN(page) == InvalidXLogRecPtr)
958  log_newpage_buffer(buf, true);
959 
960  PageSetAllVisible(page);
961  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
962  vmbuffer, InvalidTransactionId,
965  }
966 
967  UnlockReleaseBuffer(buf);
968  RecordPageWithFreeSpace(onerel, blkno, freespace);
969  continue;
970  }
971 
972  /*
973  * Prune all HOT-update chains in this page.
974  *
975  * We count tuples removed by the pruning step as removed by VACUUM.
976  */
977  tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
978  &vacrelstats->latestRemovedXid);
979 
980  /*
981  * Now scan the page to collect vacuumable items and check for tuples
982  * requiring freezing.
983  */
984  all_visible = true;
985  has_dead_tuples = false;
986  nfrozen = 0;
987  hastup = false;
988  prev_dead_count = vacrelstats->num_dead_tuples;
989  maxoff = PageGetMaxOffsetNumber(page);
990 
991  /*
992  * Note: If you change anything in the loop below, also look at
993  * heap_page_is_all_visible to see if that needs to be changed.
994  */
995  for (offnum = FirstOffsetNumber;
996  offnum <= maxoff;
997  offnum = OffsetNumberNext(offnum))
998  {
999  ItemId itemid;
1000 
1001  itemid = PageGetItemId(page, offnum);
1002 
1003  /* Unused items require no processing, but we count 'em */
1004  if (!ItemIdIsUsed(itemid))
1005  {
1006  nunused += 1;
1007  continue;
1008  }
1009 
1010  /* Redirect items mustn't be touched */
1011  if (ItemIdIsRedirected(itemid))
1012  {
1013  hastup = true; /* this page won't be truncatable */
1014  continue;
1015  }
1016 
1017  ItemPointerSet(&(tuple.t_self), blkno, offnum);
1018 
1019  /*
1020  * DEAD line pointers are to be vacuumed normally; but we don't
1021  * count them in tups_vacuumed, else we'd be double-counting (at
1022  * least in the common case where heap_page_prune() just freed up
1023  * a non-HOT tuple).
1024  */
1025  if (ItemIdIsDead(itemid))
1026  {
1027  lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
1028  all_visible = false;
1029  continue;
1030  }
1031 
1032  Assert(ItemIdIsNormal(itemid));
1033 
1034  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1035  tuple.t_len = ItemIdGetLength(itemid);
1036  tuple.t_tableOid = RelationGetRelid(onerel);
1037 
1038  tupgone = false;
1039 
1040  /*
1041  * The criteria for counting a tuple as live in this block need to
1042  * match what analyze.c's acquire_sample_rows() does, otherwise
1043  * VACUUM and ANALYZE may produce wildly different reltuples
1044  * values, e.g. when there are many recently-dead tuples.
1045  *
1046  * The logic here is a bit simpler than acquire_sample_rows(), as
1047  * VACUUM can't run inside a transaction block, which makes some
1048  * cases impossible (e.g. in-progress insert from the same
1049  * transaction).
1050  */
1051  switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
1052  {
1053  case HEAPTUPLE_DEAD:
1054 
1055  /*
1056  * Ordinarily, DEAD tuples would have been removed by
1057  * heap_page_prune(), but it's possible that the tuple
1058  * state changed since heap_page_prune() looked. In
1059  * particular an INSERT_IN_PROGRESS tuple could have
1060  * changed to DEAD if the inserter aborted. So this
1061  * cannot be considered an error condition.
1062  *
1063  * If the tuple is HOT-updated then it must only be
1064  * removed by a prune operation; so we keep it just as if
1065  * it were RECENTLY_DEAD. Also, if it's a heap-only
1066  * tuple, we choose to keep it, because it'll be a lot
1067  * cheaper to get rid of it in the next pruning pass than
1068  * to treat it like an indexed tuple. Finally, if index
1069  * cleanup is disabled, the second heap pass will not
1070  * execute, and the tuple will not get removed, so we must
1071  * treat it like any other dead tuple that we choose to
1072  * keep.
1073  *
1074  * If this were to happen for a tuple that actually needed
1075  * to be deleted, we'd be in trouble, because it'd
1076  * possibly leave a tuple below the relation's xmin
1077  * horizon alive. heap_prepare_freeze_tuple() is prepared
1078  * to detect that case and abort the transaction,
1079  * preventing corruption.
1080  */
1081  if (HeapTupleIsHotUpdated(&tuple) ||
1082  HeapTupleIsHeapOnly(&tuple) ||
1084  nkeep += 1;
1085  else
1086  tupgone = true; /* we can delete the tuple */
1087  all_visible = false;
1088  break;
1089  case HEAPTUPLE_LIVE:
1090 
1091  /*
1092  * Count it as live. Not only is this natural, but it's
1093  * also what acquire_sample_rows() does.
1094  */
1095  live_tuples += 1;
1096 
1097  /*
1098  * Is the tuple definitely visible to all transactions?
1099  *
1100  * NB: Like with per-tuple hint bits, we can't set the
1101  * PD_ALL_VISIBLE flag if the inserter committed
1102  * asynchronously. See SetHintBits for more info. Check
1103  * that the tuple is hinted xmin-committed because of
1104  * that.
1105  */
1106  if (all_visible)
1107  {
1108  TransactionId xmin;
1109 
1111  {
1112  all_visible = false;
1113  break;
1114  }
1115 
1116  /*
1117  * The inserter definitely committed. But is it old
1118  * enough that everyone sees it as committed?
1119  */
1120  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1121  if (!TransactionIdPrecedes(xmin, OldestXmin))
1122  {
1123  all_visible = false;
1124  break;
1125  }
1126 
1127  /* Track newest xmin on page. */
1128  if (TransactionIdFollows(xmin, visibility_cutoff_xid))
1129  visibility_cutoff_xid = xmin;
1130  }
1131  break;
1133 
1134  /*
1135  * If tuple is recently deleted then we must not remove it
1136  * from relation.
1137  */
1138  nkeep += 1;
1139  all_visible = false;
1140  break;
1142 
1143  /*
1144  * This is an expected case during concurrent vacuum.
1145  *
1146  * We do not count these rows as live, because we expect
1147  * the inserting transaction to update the counters at
1148  * commit, and we assume that will happen only after we
1149  * report our results. This assumption is a bit shaky,
1150  * but it is what acquire_sample_rows() does, so be
1151  * consistent.
1152  */
1153  all_visible = false;
1154  break;
1156  /* This is an expected case during concurrent vacuum */
1157  all_visible = false;
1158 
1159  /*
1160  * Count such rows as live. As above, we assume the
1161  * deleting transaction will commit and update the
1162  * counters after we report.
1163  */
1164  live_tuples += 1;
1165  break;
1166  default:
1167  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1168  break;
1169  }
1170 
1171  if (tupgone)
1172  {
1173  lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
1175  &vacrelstats->latestRemovedXid);
1176  tups_vacuumed += 1;
1177  has_dead_tuples = true;
1178  }
1179  else
1180  {
1181  bool tuple_totally_frozen;
1182 
1183  num_tuples += 1;
1184  hastup = true;
1185 
1186  /*
1187  * Each non-removable tuple must be checked to see if it needs
1188  * freezing. Note we already have exclusive buffer lock.
1189  */
1191  relfrozenxid, relminmxid,
1193  &frozen[nfrozen],
1194  &tuple_totally_frozen))
1195  frozen[nfrozen++].offset = offnum;
1196 
1197  if (!tuple_totally_frozen)
1198  all_frozen = false;
1199  }
1200  } /* scan along page */
1201 
1202  /*
1203  * If we froze any tuples, mark the buffer dirty, and write a WAL
1204  * record recording the changes. We must log the changes to be
1205  * crash-safe against future truncation of CLOG.
1206  */
1207  if (nfrozen > 0)
1208  {
1210 
1211  MarkBufferDirty(buf);
1212 
1213  /* execute collected freezes */
1214  for (i = 0; i < nfrozen; i++)
1215  {
1216  ItemId itemid;
1217  HeapTupleHeader htup;
1218 
1219  itemid = PageGetItemId(page, frozen[i].offset);
1220  htup = (HeapTupleHeader) PageGetItem(page, itemid);
1221 
1222  heap_execute_freeze_tuple(htup, &frozen[i]);
1223  }
1224 
1225  /* Now WAL-log freezing if necessary */
1226  if (RelationNeedsWAL(onerel))
1227  {
1228  XLogRecPtr recptr;
1229 
1230  recptr = log_heap_freeze(onerel, buf, FreezeLimit,
1231  frozen, nfrozen);
1232  PageSetLSN(page, recptr);
1233  }
1234 
1235  END_CRIT_SECTION();
1236  }
1237 
1238  /*
1239  * If there are no indexes we can vacuum the page right now instead of
1240  * doing a second scan. Also we don't do that but forget dead tuples
1241  * when index cleanup is disabled.
1242  */
1243  if (!vacrelstats->useindex && vacrelstats->num_dead_tuples > 0)
1244  {
1245  if (nindexes == 0)
1246  {
1247  /* Remove tuples from heap if the table has no index */
1248  lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
1249  vacuumed_pages++;
1250  has_dead_tuples = false;
1251  }
1252  else
1253  {
1254  /*
1255  * Here, we have indexes but index cleanup is disabled.
1256  * Instead of vacuuming the dead tuples on the heap, we just
1257  * forget them.
1258  *
1259  * Note that vacrelstats->dead_tuples could have tuples which
1260  * became dead after HOT-pruning but are not marked dead yet.
1261  * We do not process them because it's a very rare condition,
1262  * and the next vacuum will process them anyway.
1263  */
1265  }
1266 
1267  /*
1268  * Forget the now-vacuumed tuples, and press on, but be careful
1269  * not to reset latestRemovedXid since we want that value to be
1270  * valid.
1271  */
1272  vacrelstats->num_dead_tuples = 0;
1273 
1274  /*
1275  * Periodically do incremental FSM vacuuming to make newly-freed
1276  * space visible on upper FSM pages. Note: although we've cleaned
1277  * the current block, we haven't yet updated its FSM entry (that
1278  * happens further down), so passing end == blkno is correct.
1279  */
1280  if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1281  {
1282  FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum,
1283  blkno);
1284  next_fsm_block_to_vacuum = blkno;
1285  }
1286  }
1287 
1288  freespace = PageGetHeapFreeSpace(page);
1289 
1290  /* mark page all-visible, if appropriate */
1291  if (all_visible && !all_visible_according_to_vm)
1292  {
1294 
1295  if (all_frozen)
1296  flags |= VISIBILITYMAP_ALL_FROZEN;
1297 
1298  /*
1299  * It should never be the case that the visibility map page is set
1300  * while the page-level bit is clear, but the reverse is allowed
1301  * (if checksums are not enabled). Regardless, set the both bits
1302  * so that we get back in sync.
1303  *
1304  * NB: If the heap page is all-visible but the VM bit is not set,
1305  * we don't need to dirty the heap page. However, if checksums
1306  * are enabled, we do need to make sure that the heap page is
1307  * dirtied before passing it to visibilitymap_set(), because it
1308  * may be logged. Given that this situation should only happen in
1309  * rare cases after a crash, it is not worth optimizing.
1310  */
1311  PageSetAllVisible(page);
1312  MarkBufferDirty(buf);
1313  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1314  vmbuffer, visibility_cutoff_xid, flags);
1315  }
1316 
1317  /*
1318  * As of PostgreSQL 9.2, the visibility map bit should never be set if
1319  * the page-level bit is clear. However, it's possible that the bit
1320  * got cleared after we checked it and before we took the buffer
1321  * content lock, so we must recheck before jumping to the conclusion
1322  * that something bad has happened.
1323  */
1324  else if (all_visible_according_to_vm && !PageIsAllVisible(page)
1325  && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
1326  {
1327  elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1328  relname, blkno);
1329  visibilitymap_clear(onerel, blkno, vmbuffer,
1331  }
1332 
1333  /*
1334  * It's possible for the value returned by GetOldestXmin() to move
1335  * backwards, so it's not wrong for us to see tuples that appear to
1336  * not be visible to everyone yet, while PD_ALL_VISIBLE is already
1337  * set. The real safe xmin value never moves backwards, but
1338  * GetOldestXmin() is conservative and sometimes returns a value
1339  * that's unnecessarily small, so if we see that contradiction it just
1340  * means that the tuples that we think are not visible to everyone yet
1341  * actually are, and the PD_ALL_VISIBLE flag is correct.
1342  *
1343  * There should never be dead tuples on a page with PD_ALL_VISIBLE
1344  * set, however.
1345  */
1346  else if (PageIsAllVisible(page) && has_dead_tuples)
1347  {
1348  elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
1349  relname, blkno);
1350  PageClearAllVisible(page);
1351  MarkBufferDirty(buf);
1352  visibilitymap_clear(onerel, blkno, vmbuffer,
1354  }
1355 
1356  /*
1357  * If the all-visible page is turned out to be all-frozen but not
1358  * marked, we should so mark it. Note that all_frozen is only valid
1359  * if all_visible is true, so we must check both.
1360  */
1361  else if (all_visible_according_to_vm && all_visible && all_frozen &&
1362  !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
1363  {
1364  /*
1365  * We can pass InvalidTransactionId as the cutoff XID here,
1366  * because setting the all-frozen bit doesn't cause recovery
1367  * conflicts.
1368  */
1369  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1370  vmbuffer, InvalidTransactionId,
1372  }
1373 
1374  UnlockReleaseBuffer(buf);
1375 
1376  /* Remember the location of the last page with nonremovable tuples */
1377  if (hastup)
1378  vacrelstats->nonempty_pages = blkno + 1;
1379 
1380  /*
1381  * If we remembered any tuples for deletion, then the page will be
1382  * visited again by lazy_vacuum_heap, which will compute and record
1383  * its post-compaction free space. If not, then we're done with this
1384  * page, so remember its free space as-is. (This path will always be
1385  * taken if there are no indexes.)
1386  */
1387  if (vacrelstats->num_dead_tuples == prev_dead_count)
1388  RecordPageWithFreeSpace(onerel, blkno, freespace);
1389  }
1390 
1391  /* report that everything is scanned and vacuumed */
1393 
1394  pfree(frozen);
1395 
1396  /* save stats for use later */
1397  vacrelstats->tuples_deleted = tups_vacuumed;
1398  vacrelstats->new_dead_tuples = nkeep;
1399 
1400  /* now we can compute the new value for pg_class.reltuples */
1401  vacrelstats->new_live_tuples = vac_estimate_reltuples(onerel,
1402  nblocks,
1403  vacrelstats->tupcount_pages,
1404  live_tuples);
1405 
1406  /* also compute total number of surviving heap entries */
1407  vacrelstats->new_rel_tuples =
1408  vacrelstats->new_live_tuples + vacrelstats->new_dead_tuples;
1409 
1410  /*
1411  * Release any remaining pin on visibility map page.
1412  */
1413  if (BufferIsValid(vmbuffer))
1414  {
1415  ReleaseBuffer(vmbuffer);
1416  vmbuffer = InvalidBuffer;
1417  }
1418 
1419  /* If any tuples need to be deleted, perform final vacuum cycle */
1420  /* XXX put a threshold on min number of tuples here? */
1421  if (vacrelstats->num_dead_tuples > 0)
1422  {
1423  const int hvp_index[] = {
1425  PROGRESS_VACUUM_NUM_INDEX_VACUUMS
1426  };
1427  int64 hvp_val[2];
1428 
1429  /* Log cleanup info before we touch indexes */
1430  vacuum_log_cleanup_info(onerel, vacrelstats);
1431 
1432  /* Report that we are now vacuuming indexes */
1435 
1436  /* Remove index entries */
1437  for (i = 0; i < nindexes; i++)
1438  lazy_vacuum_index(Irel[i],
1439  &indstats[i],
1440  vacrelstats);
1441 
1442  /* Report that we are now vacuuming the heap */
1443  hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
1444  hvp_val[1] = vacrelstats->num_index_scans + 1;
1445  pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
1446 
1447  /* Remove tuples from heap */
1450  lazy_vacuum_heap(onerel, vacrelstats);
1451  vacrelstats->num_index_scans++;
1452  }
1453 
1454  /*
1455  * Vacuum the remainder of the Free Space Map. We must do this whether or
1456  * not there were indexes.
1457  */
1458  if (blkno > next_fsm_block_to_vacuum)
1459  FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno);
1460 
1461  /* report all blocks vacuumed; and that we're cleaning up */
1465 
1466  /* Do post-vacuum cleanup and statistics update for each index */
1467  if (vacrelstats->useindex)
1468  {
1469  for (i = 0; i < nindexes; i++)
1470  lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
1471  }
1472 
1473  /* If no indexes, make log report that lazy_vacuum_heap would've made */
1474  if (vacuumed_pages)
1475  ereport(elevel,
1476  (errmsg("\"%s\": removed %.0f row versions in %u pages",
1477  RelationGetRelationName(onerel),
1478  tups_vacuumed, vacuumed_pages)));
1479 
1480  /*
1481  * This is pretty messy, but we split it up so that we can skip emitting
1482  * individual parts of the message when not applicable.
1483  */
1484  initStringInfo(&buf);
1485  appendStringInfo(&buf,
1486  _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
1487  nkeep, OldestXmin);
1488  appendStringInfo(&buf, _("There were %.0f unused item identifiers.\n"),
1489  nunused);
1490  appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
1491  "Skipped %u pages due to buffer pins, ",
1492  vacrelstats->pinskipped_pages),
1493  vacrelstats->pinskipped_pages);
1494  appendStringInfo(&buf, ngettext("%u frozen page.\n",
1495  "%u frozen pages.\n",
1496  vacrelstats->frozenskipped_pages),
1497  vacrelstats->frozenskipped_pages);
1498  appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
1499  "%u pages are entirely empty.\n",
1500  empty_pages),
1501  empty_pages);
1502  appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0));
1503 
1504  ereport(elevel,
1505  (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
1506  RelationGetRelationName(onerel),
1507  tups_vacuumed, num_tuples,
1508  vacrelstats->scanned_pages, nblocks),
1509  errdetail_internal("%s", buf.data)));
1510  pfree(buf.data);
1511 }
double new_rel_tuples
Definition: vacuumlazy.c:125
void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, TransactionId *latestRemovedXid)
Definition: heapam.c:6895
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:86
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
Definition: vacuumlazy.c:1591
int heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin, bool report_stats, TransactionId *latestRemovedXid)
Definition: pruneheap.c:180
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3659
XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
Definition: heapam.c:7228
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define PageIsEmpty(page)
Definition: bufpage.h:222
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1009
BlockNumber rel_pages
Definition: vacuumlazy.c:119
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1072
OffsetNumber offset
Definition: heapam_xlog.h:321
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1526
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static void lazy_record_dead_tuple(LVRelStats *vacrelstats, ItemPointer itemptr)
Definition: vacuumlazy.c:2177
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:334
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:507
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:181
#define PROGRESS_VACUUM_MAX_DEAD_TUPLES
Definition: progress.h:26
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:31
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
double tuples_deleted
Definition: vacuumlazy.c:129
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1458
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3220
#define VISIBILITYMAP_ALL_FROZEN
Definition: visibilitymap.h:27
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:642
bool useindex
Definition: vacuumlazy.c:116
BlockNumber tupcount_pages
Definition: vacuumlazy.c:123
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:34
unsigned char uint8
Definition: c.h:356
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define InvalidBuffer
Definition: buf.h:25
static void lazy_cleanup_index(Relation indrel, IndexBulkDeleteResult *stats, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1775
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
BlockNumber scanned_pages
Definition: vacuumlazy.c:120
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3365
BlockNumber pinskipped_pages
Definition: vacuumlazy.c:121
#define SizeOfPageHeaderData
Definition: bufpage.h:216
Form_pg_class rd_rel
Definition: rel.h:83
NameData relname
Definition: pg_class.h:35
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
int errdetail_internal(const char *fmt,...)
Definition: elog.c:887
uint16 OffsetNumber
Definition: off.h:24
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6118
#define FORCE_CHECK_PAGE()
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:676
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:78
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:91
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3388
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3774
#define ERROR
Definition: elog.h:43
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:665
int max_dead_tuples
Definition: vacuumlazy.c:134
ItemPointerData t_self
Definition: htup.h:65
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:324
static TransactionId FreezeLimit
Definition: vacuumlazy.c:146
uint32 t_len
Definition: htup.h:64
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6347
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3094
static char * buf
Definition: pg_test_fsync.c:68
#define PageSetAllVisible(page)
Definition: bufpage.h:387
#define FirstOffsetNumber
Definition: off.h:27
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:147
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:453
static TransactionId OldestXmin
Definition: vacuumlazy.c:145
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
int num_dead_tuples
Definition: vacuumlazy.c:133
#define ereport(elevel, rest)
Definition: elog.h:141
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
double new_live_tuples
Definition: vacuumlazy.c:126
VacOptTernaryValue index_cleanup
Definition: vacuum.h:183
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:105
void initStringInfo(StringInfo str)
Definition: stringinfo.c:46
#define WARNING
Definition: elog.h:40
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
static int elevel
Definition: vacuumlazy.c:143
#define ngettext(s, p, n)
Definition: c.h:1103
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:230
void * palloc0(Size size)
Definition: mcxt.c:980
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:146
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3602
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:198
int num_index_scans
Definition: vacuumlazy.c:136
TransactionId relminmxid
Definition: pg_class.h:126
static void vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:463
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:149
#define PageClearAllVisible(page)
Definition: bufpage.h:389
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
#define Assert(condition)
Definition: c.h:732
double new_dead_tuples
Definition: vacuumlazy.c:127
TransactionId latestRemovedXid
Definition: vacuumlazy.c:137
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1742
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:33
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define VM_ALL_VISIBLE(r, b, v)
Definition: visibilitymap.h:32
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
Definition: pgstat.c:3242
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
size_t Size
Definition: c.h:466
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:30
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define BufferIsValid(bufnum)
Definition: bufmgr.h:113
#define RelationNeedsWAL(relation)
Definition: rel.h:521
#define VISIBILITYMAP_ALL_VISIBLE
Definition: visibilitymap.h:26
#define PageGetLSN(page)
Definition: bufpage.h:366
BlockNumber nonempty_pages
Definition: vacuumlazy.c:130
#define PageIsNew(page)
Definition: bufpage.h:229
void * palloc(Size size)
Definition: mcxt.c:949
int errmsg(const char *fmt,...)
Definition: elog.c:784
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
BlockNumber frozenskipped_pages
Definition: vacuumlazy.c:122
TransactionId relfrozenxid
Definition: pg_class.h:123
#define elog(elevel,...)
Definition: elog.h:226
int i
int options
Definition: vacuum.h:172
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:87
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup)
Definition: vacuumlazy.c:1689
void vacuum_delay_point(void)
Definition: vacuum.c:1946
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define _(x)
Definition: elog.c:84
#define RelationGetRelid(relation)
Definition: rel.h:419
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:352
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:32
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
Definition: vacuumlazy.c:2142
#define RelationGetNamespace(relation)
Definition: rel.h:460

◆ lazy_space_alloc()

static void lazy_space_alloc ( LVRelStats vacrelstats,
BlockNumber  relblocks 
)
static

Definition at line 2142 of file vacuumlazy.c.

References autovacuum_work_mem, LVRelStats::dead_tuples, IsAutoVacuumWorkerProcess(), LAZY_ALLOC_TUPLES, maintenance_work_mem, Max, LVRelStats::max_dead_tuples, MaxAllocSize, MaxHeapTuplesPerPage, Min, LVRelStats::num_dead_tuples, palloc(), and LVRelStats::useindex.

Referenced by lazy_scan_heap().

2143 {
2144  long maxtuples;
2145  int vac_work_mem = IsAutoVacuumWorkerProcess() &&
2146  autovacuum_work_mem != -1 ?
2148 
2149  if (vacrelstats->useindex)
2150  {
2151  maxtuples = (vac_work_mem * 1024L) / sizeof(ItemPointerData);
2152  maxtuples = Min(maxtuples, INT_MAX);
2153  maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
2154 
2155  /* curious coding here to ensure the multiplication can't overflow */
2156  if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
2157  maxtuples = relblocks * LAZY_ALLOC_TUPLES;
2158 
2159  /* stay sane if small maintenance_work_mem */
2160  maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
2161  }
2162  else
2163  {
2164  maxtuples = MaxHeapTuplesPerPage;
2165  }
2166 
2167  vacrelstats->num_dead_tuples = 0;
2168  vacrelstats->max_dead_tuples = (int) maxtuples;
2169  vacrelstats->dead_tuples = (ItemPointer)
2170  palloc(maxtuples * sizeof(ItemPointerData));
2171 }
int autovacuum_work_mem
Definition: autovacuum.c:115
bool useindex
Definition: vacuumlazy.c:116
#define Min(x, y)
Definition: c.h:904
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
uint32 BlockNumber
Definition: block.h:31
ItemPointerData * ItemPointer
Definition: itemptr.h:49
int max_dead_tuples
Definition: vacuumlazy.c:134
ItemPointer dead_tuples
Definition: vacuumlazy.c:135
int num_dead_tuples
Definition: vacuumlazy.c:133
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3278
#define MaxAllocSize
Definition: memutils.h:40
int maintenance_work_mem
Definition: globals.c:122
#define Max(x, y)
Definition: c.h:898
void * palloc(Size size)
Definition: mcxt.c:949
#define LAZY_ALLOC_TUPLES
Definition: vacuumlazy.c:99

◆ lazy_tid_reaped()

static bool lazy_tid_reaped ( ItemPointer  itemptr,
void *  state 
)
static

Definition at line 2202 of file vacuumlazy.c.

References LVRelStats::dead_tuples, LVRelStats::num_dead_tuples, and vac_cmp_itemptr().

Referenced by lazy_vacuum_index().

2203 {
2204  LVRelStats *vacrelstats = (LVRelStats *) state;
2205  ItemPointer res;
2206 
2207  res = (ItemPointer) bsearch((void *) itemptr,
2208  (void *) vacrelstats->dead_tuples,
2209  vacrelstats->num_dead_tuples,
2210  sizeof(ItemPointerData),
2211  vac_cmp_itemptr);
2212 
2213  return (res != NULL);
2214 }
ItemPointerData * ItemPointer
Definition: itemptr.h:49
ItemPointer dead_tuples
Definition: vacuumlazy.c:135
int num_dead_tuples
Definition: vacuumlazy.c:133
Definition: regguts.h:298
static int vac_cmp_itemptr(const void *left, const void *right)
Definition: vacuumlazy.c:2220

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 1873 of file vacuumlazy.c.

References AccessExclusiveLock, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), elevel, ereport, errdetail_internal(), errmsg(), LVRelStats::lock_waiter_detected, LVRelStats::nonempty_pages, LVRelStats::old_rel_pages, LVRelStats::pages_removed, pg_rusage_init(), pg_rusage_show(), pg_usleep(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelStats::rel_pages, RelationGetNumberOfBlocks, RelationGetRelationName, RelationTruncate(), UnlockRelation(), VACUUM_TRUNCATE_LOCK_TIMEOUT, and VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL.

Referenced by heap_vacuum_rel().

1874 {
1875  BlockNumber old_rel_pages = vacrelstats->rel_pages;
1876  BlockNumber new_rel_pages;
1877  PGRUsage ru0;
1878  int lock_retry;
1879 
1880  pg_rusage_init(&ru0);
1881 
1882  /* Report that we are now truncating */
1885 
1886  /*
1887  * Loop until no more truncating can be done.
1888  */
1889  do
1890  {
1891  /*
1892  * We need full exclusive lock on the relation in order to do
1893  * truncation. If we can't get it, give up rather than waiting --- we
1894  * don't want to block other backends, and we don't want to deadlock
1895  * (which is quite possible considering we already hold a lower-grade
1896  * lock).
1897  */
1898  vacrelstats->lock_waiter_detected = false;
1899  lock_retry = 0;
1900  while (true)
1901  {
1903  break;
1904 
1905  /*
1906  * Check for interrupts while trying to (re-)acquire the exclusive
1907  * lock.
1908  */
1910 
1911  if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
1913  {
1914  /*
1915  * We failed to establish the lock in the specified number of
1916  * retries. This means we give up truncating.
1917  */
1918  vacrelstats->lock_waiter_detected = true;
1919  ereport(elevel,
1920  (errmsg("\"%s\": stopping truncate due to conflicting lock request",
1921  RelationGetRelationName(onerel))));
1922  return;
1923  }
1924 
1926  }
1927 
1928  /*
1929  * Now that we have exclusive lock, look to see if the rel has grown
1930  * whilst we were vacuuming with non-exclusive lock. If so, give up;
1931  * the newly added pages presumably contain non-deletable tuples.
1932  */
1933  new_rel_pages = RelationGetNumberOfBlocks(onerel);
1934  if (new_rel_pages != old_rel_pages)
1935  {
1936  /*
1937  * Note: we intentionally don't update vacrelstats->rel_pages with
1938  * the new rel size here. If we did, it would amount to assuming
1939  * that the new pages are empty, which is unlikely. Leaving the
1940  * numbers alone amounts to assuming that the new pages have the
1941  * same tuple density as existing ones, which is less unlikely.
1942  */
1944  return;
1945  }
1946 
1947  /*
1948  * Scan backwards from the end to verify that the end pages actually
1949  * contain no tuples. This is *necessary*, not optional, because
1950  * other backends could have added tuples to these pages whilst we
1951  * were vacuuming.
1952  */
1953  new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1954 
1955  if (new_rel_pages >= old_rel_pages)
1956  {
1957  /* can't do anything after all */
1959  return;
1960  }
1961 
1962  /*
1963  * Okay to truncate.
1964  */
1965  RelationTruncate(onerel, new_rel_pages);
1966 
1967  /*
1968  * We can release the exclusive lock as soon as we have truncated.
1969  * Other backends can't safely access the relation until they have
1970  * processed the smgr invalidation that smgrtruncate sent out ... but
1971  * that should happen as part of standard invalidation processing once
1972  * they acquire lock on the relation.
1973  */
1975 
1976  /*
1977  * Update statistics. Here, it *is* correct to adjust rel_pages
1978  * without also touching reltuples, since the tuple count wasn't
1979  * changed by the truncation.
1980  */
1981  vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
1982  vacrelstats->rel_pages = new_rel_pages;
1983 
1984  ereport(elevel,
1985  (errmsg("\"%s\": truncated %u to %u pages",
1986  RelationGetRelationName(onerel),
1987  old_rel_pages, new_rel_pages),
1988  errdetail_internal("%s",
1989  pg_rusage_show(&ru0))));
1990  old_rel_pages = new_rel_pages;
1991  } while (new_rel_pages > vacrelstats->nonempty_pages &&
1992  vacrelstats->lock_waiter_detected);
1993 }
static BlockNumber count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:2001
BlockNumber rel_pages
Definition: vacuumlazy.c:119
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:282
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3220
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:83
uint32 BlockNumber
Definition: block.h:31
int errdetail_internal(const char *fmt,...)
Definition: elog.c:887
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:34
void pg_usleep(long microsec)
Definition: signal.c:53
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:247
#define RelationGetRelationName(relation)
Definition: rel.h:453
#define ereport(elevel, rest)
Definition: elog.h:141
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:82
static int elevel
Definition: vacuumlazy.c:143
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:198
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define AccessExclusiveLock
Definition: lockdefs.h:45
BlockNumber pages_removed
Definition: vacuumlazy.c:128
BlockNumber nonempty_pages
Definition: vacuumlazy.c:130
int errmsg(const char *fmt,...)
Definition: elog.c:784
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:230
bool lock_waiter_detected
Definition: vacuumlazy.c:138

◆ lazy_vacuum_heap()

static void lazy_vacuum_heap ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 1526 of file vacuumlazy.c.

References buf, BufferGetPage, BufferIsValid, ConditionalLockBufferForCleanup(), LVRelStats::dead_tuples, elevel, ereport, errdetail_internal(), errmsg(), InvalidBuffer, ItemPointerGetBlockNumber, lazy_vacuum_page(), MAIN_FORKNUM, LVRelStats::num_dead_tuples, PageGetHeapFreeSpace(), pg_rusage_init(), pg_rusage_show(), RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), RelationGetRelationName, ReleaseBuffer(), UnlockReleaseBuffer(), and vacuum_delay_point().

Referenced by lazy_scan_heap().

1527 {
1528  int tupindex;
1529  int npages;
1530  PGRUsage ru0;
1531  Buffer vmbuffer = InvalidBuffer;
1532 
1533  pg_rusage_init(&ru0);
1534  npages = 0;
1535 
1536  tupindex = 0;
1537  while (tupindex < vacrelstats->num_dead_tuples)
1538  {
1539  BlockNumber tblk;
1540  Buffer buf;
1541  Page page;
1542  Size freespace;
1543 
1545 
1546  tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1547  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
1548  vac_strategy);
1550  {
1551  ReleaseBuffer(buf);
1552  ++tupindex;
1553  continue;
1554  }
1555  tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
1556  &vmbuffer);
1557 
1558  /* Now that we've compacted the page, record its available space */
1559  page = BufferGetPage(buf);
1560  freespace = PageGetHeapFreeSpace(page);
1561 
1562  UnlockReleaseBuffer(buf);
1563  RecordPageWithFreeSpace(onerel, tblk, freespace);
1564  npages++;
1565  }
1566 
1567  if (BufferIsValid(vmbuffer))
1568  {
1569  ReleaseBuffer(vmbuffer);
1570  vmbuffer = InvalidBuffer;
1571  }
1572 
1573  ereport(elevel,
1574  (errmsg("\"%s\": removed %d row versions in %d pages",
1575  RelationGetRelationName(onerel),
1576  tupindex, npages),
1577  errdetail_internal("%s", pg_rusage_show(&ru0))));
1578 }
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
Definition: vacuumlazy.c:1591
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:181
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:642
#define InvalidBuffer
Definition: buf.h:25
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3365
int errdetail_internal(const char *fmt,...)
Definition: elog.c:887
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3388
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3774
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:665
ItemPointer dead_tuples
Definition: vacuumlazy.c:135
static char * buf
Definition: pg_test_fsync.c:68
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define RelationGetRelationName(relation)
Definition: rel.h:453
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define ereport(elevel, rest)
Definition: elog.h:141
static int elevel
Definition: vacuumlazy.c:143
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:149
size_t Size
Definition: c.h:466
#define BufferIsValid(bufnum)
Definition: bufmgr.h:113
int errmsg(const char *fmt,...)
Definition: elog.c:784
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void vacuum_delay_point(void)
Definition: vacuum.c:1946
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:78

◆ lazy_vacuum_index()

static void lazy_vacuum_index ( Relation  indrel,
IndexBulkDeleteResult **  stats,
LVRelStats vacrelstats 
)
static

Definition at line 1742 of file vacuumlazy.c.

References IndexVacuumInfo::analyze_only, elevel, ereport, errdetail_internal(), errmsg(), IndexVacuumInfo::estimated_count, IndexVacuumInfo::index, index_bulk_delete(), lazy_tid_reaped(), IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, LVRelStats::old_live_tuples, pg_rusage_init(), pg_rusage_show(), RelationGetRelationName, IndexVacuumInfo::report_progress, IndexVacuumInfo::strategy, and vac_strategy.

Referenced by lazy_scan_heap().

1745 {
1746  IndexVacuumInfo ivinfo;
1747  PGRUsage ru0;
1748 
1749  pg_rusage_init(&ru0);
1750 
1751  ivinfo.index = indrel;
1752  ivinfo.analyze_only = false;
1753  ivinfo.report_progress = false;
1754  ivinfo.estimated_count = true;
1755  ivinfo.message_level = elevel;
1756  /* We can only provide an approximate value of num_heap_tuples here */
1757  ivinfo.num_heap_tuples = vacrelstats->old_live_tuples;
1758  ivinfo.strategy = vac_strategy;
1759 
1760  /* Do bulk deletion */
1761  *stats = index_bulk_delete(&ivinfo, *stats,
1762  lazy_tid_reaped, (void *) vacrelstats);
1763 
1764  ereport(elevel,
1765  (errmsg("scanned index \"%s\" to remove %d row versions",
1766  RelationGetRelationName(indrel),
1767  vacrelstats->num_dead_tuples),
1768  errdetail_internal("%s", pg_rusage_show(&ru0))));
1769 }
static bool lazy_tid_reaped(ItemPointer itemptr, void *state)
Definition: vacuumlazy.c:2202
bool analyze_only
Definition: genam.h:47
bool report_progress
Definition: genam.h:48
BufferAccessStrategy strategy
Definition: genam.h:52
Relation index
Definition: genam.h:46
int errdetail_internal(const char *fmt,...)
Definition: elog.c:887
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define RelationGetRelationName(relation)
Definition: rel.h:453
int num_dead_tuples
Definition: vacuumlazy.c:133
#define ereport(elevel, rest)
Definition: elog.h:141
static int elevel
Definition: vacuumlazy.c:143
IndexBulkDeleteResult * index_bulk_delete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: indexam.c:682
double old_live_tuples
Definition: vacuumlazy.c:124
int message_level
Definition: genam.h:50
double num_heap_tuples
Definition: genam.h:51
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:149
int errmsg(const char *fmt,...)
Definition: elog.c:784
bool estimated_count
Definition: genam.h:49

◆ lazy_vacuum_page()

static int lazy_vacuum_page ( Relation  onerel,
BlockNumber  blkno,
Buffer  buffer,
int  tupindex,
LVRelStats vacrelstats,
Buffer vmbuffer 
)
static

Definition at line 1591 of file vacuumlazy.c.

References Assert, BufferGetPage, BufferIsValid, LVRelStats::dead_tuples, END_CRIT_SECTION, heap_page_is_all_visible(), InvalidXLogRecPtr, ItemIdSetUnused, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LVRelStats::latestRemovedXid, log_heap_clean(), MarkBufferDirty(), MaxOffsetNumber, LVRelStats::num_dead_tuples, PageGetItemId, PageIsAllVisible, PageRepairFragmentation(), PageSetAllVisible, PageSetLSN, pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, RelationNeedsWAL, START_CRIT_SECTION, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_get_status(), and visibilitymap_set().

Referenced by lazy_scan_heap(), and lazy_vacuum_heap().

1593 {
1594  Page page = BufferGetPage(buffer);
1595  OffsetNumber unused[MaxOffsetNumber];
1596  int uncnt = 0;
1597  TransactionId visibility_cutoff_xid;
1598  bool all_frozen;
1599 
1601 
1603 
1604  for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
1605  {
1606  BlockNumber tblk;
1607  OffsetNumber toff;
1608  ItemId itemid;
1609 
1610  tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1611  if (tblk != blkno)
1612  break; /* past end of tuples for this block */
1613  toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
1614  itemid = PageGetItemId(page, toff);
1615  ItemIdSetUnused(itemid);
1616  unused[uncnt++] = toff;
1617  }
1618 
1620 
1621  /*
1622  * Mark buffer dirty before we write WAL.
1623  */
1624  MarkBufferDirty(buffer);
1625 
1626  /* XLOG stuff */
1627  if (RelationNeedsWAL(onerel))
1628  {
1629  XLogRecPtr recptr;
1630 
1631  recptr = log_heap_clean(onerel, buffer,
1632  NULL, 0, NULL, 0,
1633  unused, uncnt,
1634  vacrelstats->latestRemovedXid);
1635  PageSetLSN(page, recptr);
1636  }
1637 
1638  /*
1639  * End critical section, so we safely can do visibility tests (which
1640  * possibly need to perform IO and allocate memory!). If we crash now the
1641  * page (including the corresponding vm bit) might not be marked all
1642  * visible, but that's fine. A later vacuum will fix that.
1643  */
1644  END_CRIT_SECTION();
1645 
1646  /*
1647  * Now that we have removed the dead tuples from the page, once again
1648  * check if the page has become all-visible. The page is already marked
1649  * dirty, exclusively locked, and, if needed, a full page image has been
1650  * emitted in the log_heap_clean() above.
1651  */
1652  if (heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid,
1653  &all_frozen))
1654  PageSetAllVisible(page);
1655 
1656  /*
1657  * All the changes to the heap page have been done. If the all-visible
1658  * flag is now set, also set the VM all-visible bit (and, if possible, the
1659  * all-frozen bit) unless this has already been done previously.
1660  */
1661  if (PageIsAllVisible(page))
1662  {
1663  uint8 vm_status = visibilitymap_get_status(onerel, blkno, vmbuffer);
1664  uint8 flags = 0;
1665 
1666  /* Set the VM all-frozen bit to flag, if needed */
1667  if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
1668  flags |= VISIBILITYMAP_ALL_VISIBLE;
1669  if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
1670  flags |= VISIBILITYMAP_ALL_FROZEN;
1671 
1672  Assert(BufferIsValid(*vmbuffer));
1673  if (flags != 0)
1674  visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr,
1675  *vmbuffer, visibility_cutoff_xid, flags);
1676  }
1677 
1678  return tupindex;
1679 }
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:507
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1458
#define MaxOffsetNumber
Definition: off.h:28
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3220
#define VISIBILITYMAP_ALL_FROZEN
Definition: visibilitymap.h:27
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
unsigned char uint8
Definition: c.h:356
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
uint32 BlockNumber
Definition: block.h:31
uint16 OffsetNumber
Definition: off.h:24
ItemPointer dead_tuples
Definition: vacuumlazy.c:135
#define PageSetAllVisible(page)
Definition: bufpage.h:387
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
int num_dead_tuples
Definition: vacuumlazy.c:133
static bool heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:2253
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:732
TransactionId latestRemovedXid
Definition: vacuumlazy.c:137
XLogRecPtr log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
Definition: heapam.c:7177
#define BufferIsValid(bufnum)
Definition: bufmgr.h:113
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:521
#define VISIBILITYMAP_ALL_VISIBLE
Definition: visibilitymap.h:26
void PageRepairFragmentation(Page page)
Definition: bufpage.c:482
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
Pointer Page
Definition: bufpage.h:78

◆ should_attempt_truncation()

static bool should_attempt_truncation ( VacuumParams params,
LVRelStats vacrelstats 
)
static

Definition at line 1852 of file vacuumlazy.c.

References LVRelStats::nonempty_pages, old_snapshot_threshold, LVRelStats::rel_pages, REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, VacuumParams::truncate, and VACOPT_TERNARY_DISABLED.

Referenced by heap_vacuum_rel().

1853 {
1854  BlockNumber possibly_freeable;
1855 
1856  if (params->truncate == VACOPT_TERNARY_DISABLED)
1857  return false;
1858 
1859  possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
1860  if (possibly_freeable > 0 &&
1861  (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
1862  possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
1864  return true;
1865  else
1866  return false;
1867 }
BlockNumber rel_pages
Definition: vacuumlazy.c:119
uint32 BlockNumber
Definition: block.h:31
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:71
VacOptTernaryValue truncate
Definition: vacuum.h:185
BlockNumber nonempty_pages
Definition: vacuumlazy.c:130
int old_snapshot_threshold
Definition: snapmgr.c:75
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:72

◆ vac_cmp_itemptr()

static int vac_cmp_itemptr ( const void *  left,
const void *  right 
)
static

Definition at line 2220 of file vacuumlazy.c.

References ItemPointerGetBlockNumber, and ItemPointerGetOffsetNumber.

Referenced by lazy_tid_reaped().

2221 {
2222  BlockNumber lblk,
2223  rblk;
2224  OffsetNumber loff,
2225  roff;
2226 
2227  lblk = ItemPointerGetBlockNumber((ItemPointer) left);
2228  rblk = ItemPointerGetBlockNumber((ItemPointer) right);
2229 
2230  if (lblk < rblk)
2231  return -1;
2232  if (lblk > rblk)
2233  return 1;
2234 
2235  loff = ItemPointerGetOffsetNumber((ItemPointer) left);
2236  roff = ItemPointerGetOffsetNumber((ItemPointer) right);
2237 
2238  if (loff < roff)
2239  return -1;
2240  if (loff > roff)
2241  return 1;
2242 
2243  return 0;
2244 }
uint32 BlockNumber
Definition: block.h:31
uint16 OffsetNumber
Definition: off.h:24
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98

◆ vacuum_log_cleanup_info()

static void vacuum_log_cleanup_info ( Relation  rel,
LVRelStats vacrelstats 
)
static

Definition at line 463 of file vacuumlazy.c.

References LVRelStats::latestRemovedXid, log_heap_cleanup_info(), RelationData::rd_node, RelationNeedsWAL, TransactionIdIsValid, and XLogIsNeeded.

Referenced by lazy_scan_heap().

464 {
465  /*
466  * Skip this for relations for which no WAL is to be written, or if we're
467  * not trying to support archive recovery.
468  */
469  if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
470  return;
471 
472  /*
473  * No need to write the record at all unless it contains a valid value
474  */
475  if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
476  (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
477 }
XLogRecPtr log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
Definition: heapam.c:7148
#define XLogIsNeeded()
Definition: xlog.h:181
RelFileNode rd_node
Definition: rel.h:54
TransactionId latestRemovedXid
Definition: vacuumlazy.c:137
#define RelationNeedsWAL(relation)
Definition: rel.h:521
#define TransactionIdIsValid(xid)
Definition: transam.h:41

Variable Documentation

◆ elevel

◆ FreezeLimit

TransactionId FreezeLimit
static

Definition at line 146 of file vacuumlazy.c.

Referenced by heap_vacuum_rel(), lazy_check_needs_freeze(), and lazy_scan_heap().

◆ MultiXactCutoff

MultiXactId MultiXactCutoff
static

◆ OldestXmin

◆ vac_strategy

BufferAccessStrategy vac_strategy
static

Definition at line 149 of file vacuumlazy.c.

Referenced by lazy_cleanup_index(), and lazy_vacuum_index().