PostgreSQL Source Code  git master
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xlog.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
#include "utils/tqual.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelStats
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define VACUUM_FSM_EVERY_PAGES   ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define LAZY_ALLOC_TUPLES   MaxHeapTuplesPerPage
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define FORCE_CHECK_PAGE()   (blkno == nblocks - 1 && should_attempt_truncation(vacrelstats))
 

Typedefs

typedef struct LVRelStats LVRelStats
 

Functions

static void lazy_scan_heap (Relation onerel, int options, LVRelStats *vacrelstats, Relation *Irel, int nindexes, bool aggressive)
 
static void lazy_vacuum_heap (Relation onerel, LVRelStats *vacrelstats)
 
static bool lazy_check_needs_freeze (Buffer buf, bool *hastup)
 
static void lazy_vacuum_index (Relation indrel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats)
 
static void lazy_cleanup_index (Relation indrel, IndexBulkDeleteResult *stats, LVRelStats *vacrelstats)
 
static int lazy_vacuum_page (Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
 
static bool should_attempt_truncation (LVRelStats *vacrelstats)
 
static void lazy_truncate_heap (Relation onerel, LVRelStats *vacrelstats)
 
static BlockNumber count_nondeletable_pages (Relation onerel, LVRelStats *vacrelstats)
 
static void lazy_space_alloc (LVRelStats *vacrelstats, BlockNumber relblocks)
 
static void lazy_record_dead_tuple (LVRelStats *vacrelstats, ItemPointer itemptr)
 
static bool lazy_tid_reaped (ItemPointer itemptr, void *state)
 
static int vac_cmp_itemptr (const void *left, const void *right)
 
static bool heap_page_is_all_visible (Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
void lazy_vacuum_rel (Relation onerel, int options, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static void vacuum_log_cleanup_info (Relation rel, LVRelStats *vacrelstats)
 

Variables

static int elevel = -1
 
static TransactionId OldestXmin
 
static TransactionId FreezeLimit
 
static MultiXactId MultiXactCutoff
 
static BufferAccessStrategy vac_strategy
 

Macro Definition Documentation

◆ FORCE_CHECK_PAGE

#define FORCE_CHECK_PAGE ( )    (blkno == nblocks - 1 && should_attempt_truncation(vacrelstats))

Referenced by lazy_scan_heap().

◆ LAZY_ALLOC_TUPLES

#define LAZY_ALLOC_TUPLES   MaxHeapTuplesPerPage

Definition at line 100 of file vacuumlazy.c.

Referenced by lazy_space_alloc().

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 112 of file vacuumlazy.c.

Referenced by count_nondeletable_pages().

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 73 of file vacuumlazy.c.

Referenced by should_attempt_truncation().

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 72 of file vacuumlazy.c.

Referenced by should_attempt_truncation().

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 106 of file vacuumlazy.c.

Referenced by lazy_scan_heap().

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES   ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 92 of file vacuumlazy.c.

Referenced by lazy_scan_heap().

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 82 of file vacuumlazy.c.

Referenced by count_nondeletable_pages().

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 84 of file vacuumlazy.c.

Referenced by lazy_truncate_heap().

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 83 of file vacuumlazy.c.

Referenced by lazy_truncate_heap().

Typedef Documentation

◆ LVRelStats

Function Documentation

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 1936 of file vacuumlazy.c.

References AccessExclusiveLock, buf, BUFFER_LOCK_SHARE, BufferGetPage, CHECK_FOR_INTERRUPTS, elevel, ereport, errmsg(), FirstOffsetNumber, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LVRelStats::lock_waiter_detected, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelStats::nonempty_pages, OffsetNumberNext, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, PageIsNew, PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelStats::rel_pages, RelationGetRelationName, StaticAssertStmt, UnlockReleaseBuffer(), and VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL.

Referenced by lazy_truncate_heap().

1937 {
1938  BlockNumber blkno;
1939  BlockNumber prefetchedUntil;
1940  instr_time starttime;
1941 
1942  /* Initialize the starttime if we check for conflicting lock requests */
1943  INSTR_TIME_SET_CURRENT(starttime);
1944 
1945  /*
1946  * Start checking blocks at what we believe relation end to be and move
1947  * backwards. (Strange coding of loop control is needed because blkno is
1948  * unsigned.) To make the scan faster, we prefetch a few blocks at a time
1949  * in forward direction, so that OS-level readahead can kick in.
1950  */
1951  blkno = vacrelstats->rel_pages;
1953  "prefetch size must be power of 2");
1954  prefetchedUntil = InvalidBlockNumber;
1955  while (blkno > vacrelstats->nonempty_pages)
1956  {
1957  Buffer buf;
1958  Page page;
1959  OffsetNumber offnum,
1960  maxoff;
1961  bool hastup;
1962 
1963  /*
1964  * Check if another process requests a lock on our relation. We are
1965  * holding an AccessExclusiveLock here, so they will be waiting. We
1966  * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
1967  * only check if that interval has elapsed once every 32 blocks to
1968  * keep the number of system calls and actual shared lock table
1969  * lookups to a minimum.
1970  */
1971  if ((blkno % 32) == 0)
1972  {
1973  instr_time currenttime;
1974  instr_time elapsed;
1975 
1976  INSTR_TIME_SET_CURRENT(currenttime);
1977  elapsed = currenttime;
1978  INSTR_TIME_SUBTRACT(elapsed, starttime);
1979  if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
1981  {
1983  {
1984  ereport(elevel,
1985  (errmsg("\"%s\": suspending truncate due to conflicting lock request",
1986  RelationGetRelationName(onerel))));
1987 
1988  vacrelstats->lock_waiter_detected = true;
1989  return blkno;
1990  }
1991  starttime = currenttime;
1992  }
1993  }
1994 
1995  /*
1996  * We don't insert a vacuum delay point here, because we have an
1997  * exclusive lock on the table which we want to hold for as short a
1998  * time as possible. We still need to check for interrupts however.
1999  */
2001 
2002  blkno--;
2003 
2004  /* If we haven't prefetched this lot yet, do so now. */
2005  if (prefetchedUntil > blkno)
2006  {
2007  BlockNumber prefetchStart;
2008  BlockNumber pblkno;
2009 
2010  prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
2011  for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
2012  {
2013  PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno);
2015  }
2016  prefetchedUntil = prefetchStart;
2017  }
2018 
2019  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
2021 
2022  /* In this phase we only need shared access to the buffer */
2024 
2025  page = BufferGetPage(buf);
2026 
2027  if (PageIsNew(page) || PageIsEmpty(page))
2028  {
2029  /* PageIsNew probably shouldn't happen... */
2030  UnlockReleaseBuffer(buf);
2031  continue;
2032  }
2033 
2034  hastup = false;
2035  maxoff = PageGetMaxOffsetNumber(page);
2036  for (offnum = FirstOffsetNumber;
2037  offnum <= maxoff;
2038  offnum = OffsetNumberNext(offnum))
2039  {
2040  ItemId itemid;
2041 
2042  itemid = PageGetItemId(page, offnum);
2043 
2044  /*
2045  * Note: any non-unused item should be taken as a reason to keep
2046  * this page. We formerly thought that DEAD tuples could be
2047  * thrown away, but that's not so, because we'd not have cleaned
2048  * out their index entries.
2049  */
2050  if (ItemIdIsUsed(itemid))
2051  {
2052  hastup = true;
2053  break; /* can stop scanning */
2054  }
2055  } /* scan along page */
2056 
2057  UnlockReleaseBuffer(buf);
2058 
2059  /* Done scanning if we found a tuple here */
2060  if (hastup)
2061  return blkno + 1;
2062  }
2063 
2064  /*
2065  * If we fall out of the loop, all the previously-thought-to-be-empty
2066  * pages still are; we need not bother to look at the last known-nonempty
2067  * page.
2068  */
2069  return vacrelstats->nonempty_pages;
2070 }
#define PageIsEmpty(page)
Definition: bufpage.h:218
BlockNumber rel_pages
Definition: vacuumlazy.c:120
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
struct timeval instr_time
Definition: instr_time.h:147
#define ItemIdIsUsed(itemId)
Definition: itemid.h:91
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:795
#define PREFETCH_SIZE
Definition: vacuumlazy.c:112
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:167
static char * buf
Definition: pg_test_fsync.c:67
#define FirstOffsetNumber
Definition: off.h:27
#define RelationGetRelationName(relation)
Definition: rel.h:441
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
static int elevel
Definition: vacuumlazy.c:144
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:82
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
void PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:529
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:275
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:150
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:202
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
#define InvalidBlockNumber
Definition: block.h:33
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:153
#define AccessExclusiveLock
Definition: lockdefs.h:45
BlockNumber nonempty_pages
Definition: vacuumlazy.c:131
#define PageIsNew(page)
Definition: bufpage.h:225
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:98
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74
bool lock_waiter_detected
Definition: vacuumlazy.c:139

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( Relation  rel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool all_frozen 
)
static

Definition at line 2189 of file vacuumlazy.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, OffsetNumberNext, OldestXmin, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), and TransactionIdPrecedes().

Referenced by lazy_vacuum_page().

2192 {
2193  Page page = BufferGetPage(buf);
2195  OffsetNumber offnum,
2196  maxoff;
2197  bool all_visible = true;
2198 
2199  *visibility_cutoff_xid = InvalidTransactionId;
2200  *all_frozen = true;
2201 
2202  /*
2203  * This is a stripped down version of the line pointer scan in
2204  * lazy_scan_heap(). So if you change anything here, also check that code.
2205  */
2206  maxoff = PageGetMaxOffsetNumber(page);
2207  for (offnum = FirstOffsetNumber;
2208  offnum <= maxoff && all_visible;
2209  offnum = OffsetNumberNext(offnum))
2210  {
2211  ItemId itemid;
2212  HeapTupleData tuple;
2213 
2214  itemid = PageGetItemId(page, offnum);
2215 
2216  /* Unused or redirect line pointers are of no interest */
2217  if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
2218  continue;
2219 
2220  ItemPointerSet(&(tuple.t_self), blockno, offnum);
2221 
2222  /*
2223  * Dead line pointers can have index pointers pointing to them. So
2224  * they can't be treated as visible
2225  */
2226  if (ItemIdIsDead(itemid))
2227  {
2228  all_visible = false;
2229  *all_frozen = false;
2230  break;
2231  }
2232 
2233  Assert(ItemIdIsNormal(itemid));
2234 
2235  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2236  tuple.t_len = ItemIdGetLength(itemid);
2237  tuple.t_tableOid = RelationGetRelid(rel);
2238 
2239  switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
2240  {
2241  case HEAPTUPLE_LIVE:
2242  {
2243  TransactionId xmin;
2244 
2245  /* Check comments in lazy_scan_heap. */
2247  {
2248  all_visible = false;
2249  *all_frozen = false;
2250  break;
2251  }
2252 
2253  /*
2254  * The inserter definitely committed. But is it old enough
2255  * that everyone sees it as committed?
2256  */
2257  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
2258  if (!TransactionIdPrecedes(xmin, OldestXmin))
2259  {
2260  all_visible = false;
2261  *all_frozen = false;
2262  break;
2263  }
2264 
2265  /* Track newest xmin on page. */
2266  if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
2267  *visibility_cutoff_xid = xmin;
2268 
2269  /* Check whether this tuple is already frozen or not */
2270  if (all_visible && *all_frozen &&
2272  *all_frozen = false;
2273  }
2274  break;
2275 
2276  case HEAPTUPLE_DEAD:
2280  {
2281  all_visible = false;
2282  *all_frozen = false;
2283  break;
2284  }
2285  default:
2286  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2287  break;
2288  }
2289  } /* scan along page */
2290 
2291  return all_visible;
2292 }
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:334
uint32 TransactionId
Definition: c.h:474
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdIsUsed(itemId)
Definition: itemid.h:91
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
Definition: tqual.c:1164
uint32 BlockNumber
Definition: block.h:31
#define ItemIdIsDead(itemId)
Definition: itemid.h:112
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7412
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:329
uint32 t_len
Definition: htup.h:64
static char * buf
Definition: pg_test_fsync.c:67
#define FirstOffsetNumber
Definition: off.h:27
#define InvalidTransactionId
Definition: transam.h:31
static TransactionId OldestXmin
Definition: vacuumlazy.c:146
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
#define Assert(condition)
Definition: c.h:699
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:318
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define elog
Definition: elog.h:219
#define RelationGetRelid(relation)
Definition: rel.h:407
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:105

◆ lazy_check_needs_freeze()

static bool lazy_check_needs_freeze ( Buffer  buf,
bool hastup 
)
static

Definition at line 1630 of file vacuumlazy.c.

References BufferGetPage, FirstOffsetNumber, FreezeLimit, heap_tuple_needs_freeze(), ItemIdIsNormal, ItemIdIsUsed, MultiXactCutoff, OffsetNumberNext, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, and PageIsNew.

Referenced by lazy_scan_heap().

1631 {
1632  Page page = BufferGetPage(buf);
1633  OffsetNumber offnum,
1634  maxoff;
1635  HeapTupleHeader tupleheader;
1636 
1637  *hastup = false;
1638 
1639  /* If we hit an uninitialized page, we want to force vacuuming it. */
1640  if (PageIsNew(page))
1641  return true;
1642 
1643  /* Quick out for ordinary empty page. */
1644  if (PageIsEmpty(page))
1645  return false;
1646 
1647  maxoff = PageGetMaxOffsetNumber(page);
1648  for (offnum = FirstOffsetNumber;
1649  offnum <= maxoff;
1650  offnum = OffsetNumberNext(offnum))
1651  {
1652  ItemId itemid;
1653 
1654  itemid = PageGetItemId(page, offnum);
1655 
1656  /* this should match hastup test in count_nondeletable_pages() */
1657  if (ItemIdIsUsed(itemid))
1658  *hastup = true;
1659 
1660  /* dead and redirect items never need freezing */
1661  if (!ItemIdIsNormal(itemid))
1662  continue;
1663 
1664  tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1665 
1666  if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
1667  MultiXactCutoff, buf))
1668  return true;
1669  } /* scan along page */
1670 
1671  return false;
1672 }
#define PageIsEmpty(page)
Definition: bufpage.h:218
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdIsUsed(itemId)
Definition: itemid.h:91
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
static TransactionId FreezeLimit
Definition: vacuumlazy.c:147
static char * buf
Definition: pg_test_fsync.c:67
#define FirstOffsetNumber
Definition: off.h:27
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:148
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
Definition: heapam.c:7465
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
#define PageIsNew(page)
Definition: bufpage.h:225
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74

◆ lazy_cleanup_index()

static void lazy_cleanup_index ( Relation  indrel,
IndexBulkDeleteResult stats,
LVRelStats vacrelstats 
)
static

Definition at line 1714 of file vacuumlazy.c.

References IndexVacuumInfo::analyze_only, elevel, ereport, errdetail(), errmsg(), IndexVacuumInfo::estimated_count, IndexBulkDeleteResult::estimated_count, IndexVacuumInfo::index, index_vacuum_cleanup(), InvalidMultiXactId, InvalidTransactionId, IndexVacuumInfo::message_level, LVRelStats::new_rel_tuples, IndexVacuumInfo::num_heap_tuples, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, pfree(), pg_rusage_init(), pg_rusage_show(), LVRelStats::rel_pages, RelationGetRelationName, IndexVacuumInfo::strategy, LVRelStats::tupcount_pages, IndexBulkDeleteResult::tuples_removed, vac_strategy, and vac_update_relstats().

Referenced by lazy_scan_heap().

1717 {
1718  IndexVacuumInfo ivinfo;
1719  PGRUsage ru0;
1720 
1721  pg_rusage_init(&ru0);
1722 
1723  ivinfo.index = indrel;
1724  ivinfo.analyze_only = false;
1725  ivinfo.estimated_count = (vacrelstats->tupcount_pages < vacrelstats->rel_pages);
1726  ivinfo.message_level = elevel;
1727 
1728  /*
1729  * Now we can provide a better estimate of total number of surviving
1730  * tuples (we assume indexes are more interested in that than in the
1731  * number of nominally live tuples).
1732  */
1733  ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
1734  ivinfo.strategy = vac_strategy;
1735 
1736  stats = index_vacuum_cleanup(&ivinfo, stats);
1737 
1738  if (!stats)
1739  return;
1740 
1741  /*
1742  * Now update statistics in pg_class, but only if the index says the count
1743  * is accurate.
1744  */
1745  if (!stats->estimated_count)
1746  vac_update_relstats(indrel,
1747  stats->num_pages,
1748  stats->num_index_tuples,
1749  0,
1750  false,
1753  false);
1754 
1755  ereport(elevel,
1756  (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
1757  RelationGetRelationName(indrel),
1758  stats->num_index_tuples,
1759  stats->num_pages),
1760  errdetail("%.0f index row versions were removed.\n"
1761  "%u index pages have been deleted, %u are currently reusable.\n"
1762  "%s.",
1763  stats->tuples_removed,
1764  stats->pages_deleted, stats->pages_free,
1765  pg_rusage_show(&ru0))));
1766 
1767  pfree(stats);
1768 }
double new_rel_tuples
Definition: vacuumlazy.c:126
double tuples_removed
Definition: genam.h:77
BlockNumber rel_pages
Definition: vacuumlazy.c:120
bool analyze_only
Definition: genam.h:47
BlockNumber tupcount_pages
Definition: vacuumlazy.c:124
BufferAccessStrategy strategy
Definition: genam.h:51
Relation index
Definition: genam.h:46
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
void pfree(void *pointer)
Definition: mcxt.c:1031
BlockNumber num_pages
Definition: genam.h:73
BlockNumber pages_free
Definition: genam.h:79
int errdetail(const char *fmt,...)
Definition: elog.c:873
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:441
BlockNumber pages_deleted
Definition: genam.h:78
#define ereport(elevel, rest)
Definition: elog.h:122
static int elevel
Definition: vacuumlazy.c:144
#define InvalidMultiXactId
Definition: multixact.h:23
int message_level
Definition: genam.h:49
double num_heap_tuples
Definition: genam.h:50
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:150
IndexBulkDeleteResult * index_vacuum_cleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: indexam.c:764
int errmsg(const char *fmt,...)
Definition: elog.c:797
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool in_outer_xact)
Definition: vacuum.c:864
double num_index_tuples
Definition: genam.h:76
bool estimated_count
Definition: genam.h:75
bool estimated_count
Definition: genam.h:48

◆ lazy_record_dead_tuple()

static void lazy_record_dead_tuple ( LVRelStats vacrelstats,
ItemPointer  itemptr 
)
static

Definition at line 2113 of file vacuumlazy.c.

References LVRelStats::dead_tuples, LVRelStats::max_dead_tuples, LVRelStats::num_dead_tuples, pgstat_progress_update_param(), and PROGRESS_VACUUM_NUM_DEAD_TUPLES.

Referenced by lazy_scan_heap().

2115 {
2116  /*
2117  * The array shouldn't overflow under normal behavior, but perhaps it
2118  * could if we are given a really small maintenance_work_mem. In that
2119  * case, just forget the last few tuples (we'll get 'em next time).
2120  */
2121  if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
2122  {
2123  vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
2124  vacrelstats->num_dead_tuples++;
2126  vacrelstats->num_dead_tuples);
2127  }
2128 }
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3093
int max_dead_tuples
Definition: vacuumlazy.c:135
ItemPointer dead_tuples
Definition: vacuumlazy.c:136
int num_dead_tuples
Definition: vacuumlazy.c:134
#define PROGRESS_VACUUM_NUM_DEAD_TUPLES
Definition: progress.h:27

◆ lazy_scan_heap()

static void lazy_scan_heap ( Relation  onerel,
int  options,
LVRelStats vacrelstats,
Relation Irel,
int  nindexes,
bool  aggressive 
)
static

Definition at line 463 of file vacuumlazy.c.

References _, appendStringInfo(), Assert, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferGetPageSize, BufferIsValid, ConditionalLockBufferForCleanup(), StringInfoData::data, elevel, elog, END_CRIT_SECTION, ereport, errdetail_internal(), errmsg(), ERROR, ExclusiveLock, FirstOffsetNumber, FORCE_CHECK_PAGE, FreeSpaceMapVacuumRange(), FreezeLimit, LVRelStats::frozenskipped_pages, get_namespace_name(), heap_execute_freeze_tuple(), heap_page_prune(), heap_prepare_freeze_tuple(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleGetOid, HeapTupleHeaderAdvanceLatestRemovedXid(), HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleSatisfiesVacuum(), i, initStringInfo(), InvalidBuffer, InvalidTransactionId, InvalidXLogRecPtr, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, LVRelStats::latestRemovedXid, lazy_check_needs_freeze(), lazy_cleanup_index(), lazy_record_dead_tuple(), lazy_space_alloc(), lazy_vacuum_heap(), lazy_vacuum_index(), lazy_vacuum_page(), LockBuffer(), LockBufferForCleanup(), LockRelationForExtension(), log_heap_freeze(), log_newpage_buffer(), MAIN_FORKNUM, MarkBufferDirty(), LVRelStats::max_dead_tuples, MaxHeapTuplesPerPage, MultiXactCutoff, LVRelStats::new_dead_tuples, LVRelStats::new_live_tuples, LVRelStats::new_rel_tuples, ngettext, LVRelStats::nonempty_pages, LVRelStats::num_dead_tuples, LVRelStats::num_index_scans, xl_heap_freeze_tuple::offset, OffsetNumberNext, OidIsValid, OldestXmin, PageClearAllVisible, PageGetHeapFreeSpace(), PageGetItem, PageGetItemId, PageGetLSN, PageGetMaxOffsetNumber, PageInit(), PageIsAllVisible, PageIsEmpty, PageIsNew, PageSetAllVisible, PageSetLSN, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), LVRelStats::pinskipped_pages, PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLES, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, RBM_NORMAL, RelationData::rd_rel, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelStats::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), relfrozenxid, relminmxid, LVRelStats::scanned_pages, SKIP_PAGES_THRESHOLD, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdPrecedes(), LVRelStats::tupcount_pages, LVRelStats::tuples_deleted, UnlockRelationForExtension(), UnlockReleaseBuffer(), vac_estimate_reltuples(), VACOPT_DISABLE_PAGE_SKIPPING, vacuum_delay_point(), VACUUM_FSM_EVERY_PAGES, vacuum_log_cleanup_info(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, VM_ALL_FROZEN, VM_ALL_VISIBLE, and WARNING.

Referenced by lazy_vacuum_rel().

465 {
466  BlockNumber nblocks,
467  blkno;
468  HeapTupleData tuple;
469  char *relname;
470  TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid;
471  TransactionId relminmxid = onerel->rd_rel->relminmxid;
472  BlockNumber empty_pages,
473  vacuumed_pages,
474  next_fsm_block_to_vacuum;
475  double num_tuples, /* total number of nonremovable tuples */
476  live_tuples, /* live tuples (reltuples estimate) */
477  tups_vacuumed, /* tuples cleaned up by vacuum */
478  nkeep, /* dead-but-not-removable tuples */
479  nunused; /* unused item pointers */
480  IndexBulkDeleteResult **indstats;
481  int i;
482  PGRUsage ru0;
483  Buffer vmbuffer = InvalidBuffer;
484  BlockNumber next_unskippable_block;
485  bool skipping_blocks;
486  xl_heap_freeze_tuple *frozen;
488  const int initprog_index[] = {
492  };
493  int64 initprog_val[3];
494 
495  pg_rusage_init(&ru0);
496 
497  relname = RelationGetRelationName(onerel);
498  if (aggressive)
499  ereport(elevel,
500  (errmsg("aggressively vacuuming \"%s.%s\"",
502  relname)));
503  else
504  ereport(elevel,
505  (errmsg("vacuuming \"%s.%s\"",
507  relname)));
508 
509  empty_pages = vacuumed_pages = 0;
510  next_fsm_block_to_vacuum = (BlockNumber) 0;
511  num_tuples = live_tuples = tups_vacuumed = nkeep = nunused = 0;
512 
513  indstats = (IndexBulkDeleteResult **)
514  palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
515 
516  nblocks = RelationGetNumberOfBlocks(onerel);
517  vacrelstats->rel_pages = nblocks;
518  vacrelstats->scanned_pages = 0;
519  vacrelstats->tupcount_pages = 0;
520  vacrelstats->nonempty_pages = 0;
521  vacrelstats->latestRemovedXid = InvalidTransactionId;
522 
523  lazy_space_alloc(vacrelstats, nblocks);
525 
526  /* Report that we're scanning the heap, advertising total # of blocks */
527  initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
528  initprog_val[1] = nblocks;
529  initprog_val[2] = vacrelstats->max_dead_tuples;
530  pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
531 
532  /*
533  * Except when aggressive is set, we want to skip pages that are
534  * all-visible according to the visibility map, but only when we can skip
535  * at least SKIP_PAGES_THRESHOLD consecutive pages. Since we're reading
536  * sequentially, the OS should be doing readahead for us, so there's no
537  * gain in skipping a page now and then; that's likely to disable
538  * readahead and so be counterproductive. Also, skipping even a single
539  * page means that we can't update relfrozenxid, so we only want to do it
540  * if we can skip a goodly number of pages.
541  *
542  * When aggressive is set, we can't skip pages just because they are
543  * all-visible, but we can still skip pages that are all-frozen, since
544  * such pages do not need freezing and do not affect the value that we can
545  * safely set for relfrozenxid or relminmxid.
546  *
547  * Before entering the main loop, establish the invariant that
548  * next_unskippable_block is the next block number >= blkno that we can't
549  * skip based on the visibility map, either all-visible for a regular scan
550  * or all-frozen for an aggressive scan. We set it to nblocks if there's
551  * no such block. We also set up the skipping_blocks flag correctly at
552  * this stage.
553  *
554  * Note: The value returned by visibilitymap_get_status could be slightly
555  * out-of-date, since we make this test before reading the corresponding
556  * heap page or locking the buffer. This is OK. If we mistakenly think
557  * that the page is all-visible or all-frozen when in fact the flag's just
558  * been cleared, we might fail to vacuum the page. It's easy to see that
559  * skipping a page when aggressive is not set is not a very big deal; we
560  * might leave some dead tuples lying around, but the next vacuum will
561  * find them. But even when aggressive *is* set, it's still OK if we miss
562  * a page whose all-frozen marking has just been cleared. Any new XIDs
563  * just added to that page are necessarily newer than the GlobalXmin we
564  * computed, so they'll have no effect on the value to which we can safely
565  * set relfrozenxid. A similar argument applies for MXIDs and relminmxid.
566  *
567  * We will scan the table's last page, at least to the extent of
568  * determining whether it has tuples or not, even if it should be skipped
569  * according to the above rules; except when we've already determined that
570  * it's not worth trying to truncate the table. This avoids having
571  * lazy_truncate_heap() take access-exclusive lock on the table to attempt
572  * a truncation that just fails immediately because there are tuples in
573  * the last page. This is worth avoiding mainly because such a lock must
574  * be replayed on any hot standby, where it can be disruptive.
575  */
576  next_unskippable_block = 0;
578  {
579  while (next_unskippable_block < nblocks)
580  {
581  uint8 vmstatus;
582 
583  vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
584  &vmbuffer);
585  if (aggressive)
586  {
587  if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
588  break;
589  }
590  else
591  {
592  if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
593  break;
594  }
596  next_unskippable_block++;
597  }
598  }
599 
600  if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
601  skipping_blocks = true;
602  else
603  skipping_blocks = false;
604 
605  for (blkno = 0; blkno < nblocks; blkno++)
606  {
607  Buffer buf;
608  Page page;
609  OffsetNumber offnum,
610  maxoff;
611  bool tupgone,
612  hastup;
613  int prev_dead_count;
614  int nfrozen;
615  Size freespace;
616  bool all_visible_according_to_vm = false;
617  bool all_visible;
618  bool all_frozen = true; /* provided all_visible is also true */
619  bool has_dead_tuples;
620  TransactionId visibility_cutoff_xid = InvalidTransactionId;
621 
622  /* see note above about forcing scanning of last page */
623 #define FORCE_CHECK_PAGE() \
624  (blkno == nblocks - 1 && should_attempt_truncation(vacrelstats))
625 
627 
628  if (blkno == next_unskippable_block)
629  {
630  /* Time to advance next_unskippable_block */
631  next_unskippable_block++;
632  if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
633  {
634  while (next_unskippable_block < nblocks)
635  {
636  uint8 vmskipflags;
637 
638  vmskipflags = visibilitymap_get_status(onerel,
639  next_unskippable_block,
640  &vmbuffer);
641  if (aggressive)
642  {
643  if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
644  break;
645  }
646  else
647  {
648  if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
649  break;
650  }
652  next_unskippable_block++;
653  }
654  }
655 
656  /*
657  * We know we can't skip the current block. But set up
658  * skipping_blocks to do the right thing at the following blocks.
659  */
660  if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD)
661  skipping_blocks = true;
662  else
663  skipping_blocks = false;
664 
665  /*
666  * Normally, the fact that we can't skip this block must mean that
667  * it's not all-visible. But in an aggressive vacuum we know only
668  * that it's not all-frozen, so it might still be all-visible.
669  */
670  if (aggressive && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
671  all_visible_according_to_vm = true;
672  }
673  else
674  {
675  /*
676  * The current block is potentially skippable; if we've seen a
677  * long enough run of skippable blocks to justify skipping it, and
678  * we're not forced to check it, then go ahead and skip.
679  * Otherwise, the page must be at least all-visible if not
680  * all-frozen, so we can set all_visible_according_to_vm = true.
681  */
682  if (skipping_blocks && !FORCE_CHECK_PAGE())
683  {
684  /*
685  * Tricky, tricky. If this is in aggressive vacuum, the page
686  * must have been all-frozen at the time we checked whether it
687  * was skippable, but it might not be any more. We must be
688  * careful to count it as a skipped all-frozen page in that
689  * case, or else we'll think we can't update relfrozenxid and
690  * relminmxid. If it's not an aggressive vacuum, we don't
691  * know whether it was all-frozen, so we have to recheck; but
692  * in this case an approximate answer is OK.
693  */
694  if (aggressive || VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
695  vacrelstats->frozenskipped_pages++;
696  continue;
697  }
698  all_visible_according_to_vm = true;
699  }
700 
702 
703  /*
704  * If we are close to overrunning the available space for dead-tuple
705  * TIDs, pause and do a cycle of vacuuming before we tackle this page.
706  */
707  if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
708  vacrelstats->num_dead_tuples > 0)
709  {
710  const int hvp_index[] = {
713  };
714  int64 hvp_val[2];
715 
716  /*
717  * Before beginning index vacuuming, we release any pin we may
718  * hold on the visibility map page. This isn't necessary for
719  * correctness, but we do it anyway to avoid holding the pin
720  * across a lengthy, unrelated operation.
721  */
722  if (BufferIsValid(vmbuffer))
723  {
724  ReleaseBuffer(vmbuffer);
725  vmbuffer = InvalidBuffer;
726  }
727 
728  /* Log cleanup info before we touch indexes */
729  vacuum_log_cleanup_info(onerel, vacrelstats);
730 
731  /* Report that we are now vacuuming indexes */
734 
735  /* Remove index entries */
736  for (i = 0; i < nindexes; i++)
737  lazy_vacuum_index(Irel[i],
738  &indstats[i],
739  vacrelstats);
740 
741  /*
742  * Report that we are now vacuuming the heap. We also increase
743  * the number of index scans here; note that by using
744  * pgstat_progress_update_multi_param we can update both
745  * parameters atomically.
746  */
748  hvp_val[1] = vacrelstats->num_index_scans + 1;
749  pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
750 
751  /* Remove tuples from heap */
752  lazy_vacuum_heap(onerel, vacrelstats);
753 
754  /*
755  * Forget the now-vacuumed tuples, and press on, but be careful
756  * not to reset latestRemovedXid since we want that value to be
757  * valid.
758  */
759  vacrelstats->num_dead_tuples = 0;
760  vacrelstats->num_index_scans++;
761 
762  /*
763  * Vacuum the Free Space Map to make newly-freed space visible on
764  * upper-level FSM pages. Note we have not yet processed blkno.
765  */
766  FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno);
767  next_fsm_block_to_vacuum = blkno;
768 
769  /* Report that we are once again scanning the heap */
772  }
773 
774  /*
775  * Pin the visibility map page in case we need to mark the page
776  * all-visible. In most cases this will be very cheap, because we'll
777  * already have the correct page pinned anyway. However, it's
778  * possible that (a) next_unskippable_block is covered by a different
779  * VM page than the current block or (b) we released our pin and did a
780  * cycle of index vacuuming.
781  *
782  */
783  visibilitymap_pin(onerel, blkno, &vmbuffer);
784 
785  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
787 
788  /* We need buffer cleanup lock so that we can prune HOT chains. */
790  {
791  /*
792  * If we're not performing an aggressive scan to guard against XID
793  * wraparound, and we don't want to forcibly check the page, then
794  * it's OK to skip vacuuming pages we get a lock conflict on. They
795  * will be dealt with in some future vacuum.
796  */
797  if (!aggressive && !FORCE_CHECK_PAGE())
798  {
799  ReleaseBuffer(buf);
800  vacrelstats->pinskipped_pages++;
801  continue;
802  }
803 
804  /*
805  * Read the page with share lock to see if any xids on it need to
806  * be frozen. If not we just skip the page, after updating our
807  * scan statistics. If there are some, we wait for cleanup lock.
808  *
809  * We could defer the lock request further by remembering the page
810  * and coming back to it later, or we could even register
811  * ourselves for multiple buffers and then service whichever one
812  * is received first. For now, this seems good enough.
813  *
814  * If we get here with aggressive false, then we're just forcibly
815  * checking the page, and so we don't want to insist on getting
816  * the lock; we only need to know if the page contains tuples, so
817  * that we can update nonempty_pages correctly. It's convenient
818  * to use lazy_check_needs_freeze() for both situations, though.
819  */
821  if (!lazy_check_needs_freeze(buf, &hastup))
822  {
823  UnlockReleaseBuffer(buf);
824  vacrelstats->scanned_pages++;
825  vacrelstats->pinskipped_pages++;
826  if (hastup)
827  vacrelstats->nonempty_pages = blkno + 1;
828  continue;
829  }
830  if (!aggressive)
831  {
832  /*
833  * Here, we must not advance scanned_pages; that would amount
834  * to claiming that the page contains no freezable tuples.
835  */
836  UnlockReleaseBuffer(buf);
837  vacrelstats->pinskipped_pages++;
838  if (hastup)
839  vacrelstats->nonempty_pages = blkno + 1;
840  continue;
841  }
844  /* drop through to normal processing */
845  }
846 
847  vacrelstats->scanned_pages++;
848  vacrelstats->tupcount_pages++;
849 
850  page = BufferGetPage(buf);
851 
852  if (PageIsNew(page))
853  {
854  /*
855  * An all-zeroes page could be left over if a backend extends the
856  * relation but crashes before initializing the page. Reclaim such
857  * pages for use.
858  *
859  * We have to be careful here because we could be looking at a
860  * page that someone has just added to the relation and not yet
861  * been able to initialize (see RelationGetBufferForTuple). To
862  * protect against that, release the buffer lock, grab the
863  * relation extension lock momentarily, and re-lock the buffer. If
864  * the page is still uninitialized by then, it must be left over
865  * from a crashed backend, and we can initialize it.
866  *
867  * We don't really need the relation lock when this is a new or
868  * temp relation, but it's probably not worth the code space to
869  * check that, since this surely isn't a critical path.
870  *
871  * Note: the comparable code in vacuum.c need not worry because
872  * it's got exclusive lock on the whole relation.
873  */
878  if (PageIsNew(page))
879  {
881  (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
882  relname, blkno)));
883  PageInit(page, BufferGetPageSize(buf), 0);
884  empty_pages++;
885  }
886  freespace = PageGetHeapFreeSpace(page);
887  MarkBufferDirty(buf);
888  UnlockReleaseBuffer(buf);
889 
890  RecordPageWithFreeSpace(onerel, blkno, freespace);
891  continue;
892  }
893 
894  if (PageIsEmpty(page))
895  {
896  empty_pages++;
897  freespace = PageGetHeapFreeSpace(page);
898 
899  /* empty pages are always all-visible and all-frozen */
900  if (!PageIsAllVisible(page))
901  {
903 
904  /* mark buffer dirty before writing a WAL record */
905  MarkBufferDirty(buf);
906 
907  /*
908  * It's possible that another backend has extended the heap,
909  * initialized the page, and then failed to WAL-log the page
910  * due to an ERROR. Since heap extension is not WAL-logged,
911  * recovery might try to replay our record setting the page
912  * all-visible and find that the page isn't initialized, which
913  * will cause a PANIC. To prevent that, check whether the
914  * page has been previously WAL-logged, and if not, do that
915  * now.
916  */
917  if (RelationNeedsWAL(onerel) &&
918  PageGetLSN(page) == InvalidXLogRecPtr)
919  log_newpage_buffer(buf, true);
920 
921  PageSetAllVisible(page);
922  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
923  vmbuffer, InvalidTransactionId,
926  }
927 
928  UnlockReleaseBuffer(buf);
929  RecordPageWithFreeSpace(onerel, blkno, freespace);
930  continue;
931  }
932 
933  /*
934  * Prune all HOT-update chains in this page.
935  *
936  * We count tuples removed by the pruning step as removed by VACUUM.
937  */
938  tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
939  &vacrelstats->latestRemovedXid);
940 
941  /*
942  * Now scan the page to collect vacuumable items and check for tuples
943  * requiring freezing.
944  */
945  all_visible = true;
946  has_dead_tuples = false;
947  nfrozen = 0;
948  hastup = false;
949  prev_dead_count = vacrelstats->num_dead_tuples;
950  maxoff = PageGetMaxOffsetNumber(page);
951 
952  /*
953  * Note: If you change anything in the loop below, also look at
954  * heap_page_is_all_visible to see if that needs to be changed.
955  */
956  for (offnum = FirstOffsetNumber;
957  offnum <= maxoff;
958  offnum = OffsetNumberNext(offnum))
959  {
960  ItemId itemid;
961 
962  itemid = PageGetItemId(page, offnum);
963 
964  /* Unused items require no processing, but we count 'em */
965  if (!ItemIdIsUsed(itemid))
966  {
967  nunused += 1;
968  continue;
969  }
970 
971  /* Redirect items mustn't be touched */
972  if (ItemIdIsRedirected(itemid))
973  {
974  hastup = true; /* this page won't be truncatable */
975  continue;
976  }
977 
978  ItemPointerSet(&(tuple.t_self), blkno, offnum);
979 
980  /*
981  * DEAD item pointers are to be vacuumed normally; but we don't
982  * count them in tups_vacuumed, else we'd be double-counting (at
983  * least in the common case where heap_page_prune() just freed up
984  * a non-HOT tuple).
985  */
986  if (ItemIdIsDead(itemid))
987  {
988  lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
989  all_visible = false;
990  continue;
991  }
992 
993  Assert(ItemIdIsNormal(itemid));
994 
995  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
996  tuple.t_len = ItemIdGetLength(itemid);
997  tuple.t_tableOid = RelationGetRelid(onerel);
998 
999  tupgone = false;
1000 
1001  /*
1002  * The criteria for counting a tuple as live in this block need to
1003  * match what analyze.c's acquire_sample_rows() does, otherwise
1004  * VACUUM and ANALYZE may produce wildly different reltuples
1005  * values, e.g. when there are many recently-dead tuples.
1006  *
1007  * The logic here is a bit simpler than acquire_sample_rows(), as
1008  * VACUUM can't run inside a transaction block, which makes some
1009  * cases impossible (e.g. in-progress insert from the same
1010  * transaction).
1011  */
1012  switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
1013  {
1014  case HEAPTUPLE_DEAD:
1015 
1016  /*
1017  * Ordinarily, DEAD tuples would have been removed by
1018  * heap_page_prune(), but it's possible that the tuple
1019  * state changed since heap_page_prune() looked. In
1020  * particular an INSERT_IN_PROGRESS tuple could have
1021  * changed to DEAD if the inserter aborted. So this
1022  * cannot be considered an error condition.
1023  *
1024  * If the tuple is HOT-updated then it must only be
1025  * removed by a prune operation; so we keep it just as if
1026  * it were RECENTLY_DEAD. Also, if it's a heap-only
1027  * tuple, we choose to keep it, because it'll be a lot
1028  * cheaper to get rid of it in the next pruning pass than
1029  * to treat it like an indexed tuple.
1030  *
1031  * If this were to happen for a tuple that actually needed
1032  * to be deleted, we'd be in trouble, because it'd
1033  * possibly leave a tuple below the relation's xmin
1034  * horizon alive. heap_prepare_freeze_tuple() is prepared
1035  * to detect that case and abort the transaction,
1036  * preventing corruption.
1037  */
1038  if (HeapTupleIsHotUpdated(&tuple) ||
1039  HeapTupleIsHeapOnly(&tuple))
1040  nkeep += 1;
1041  else
1042  tupgone = true; /* we can delete the tuple */
1043  all_visible = false;
1044  break;
1045  case HEAPTUPLE_LIVE:
1046  /* Tuple is good --- but let's do some validity checks */
1047  if (onerel->rd_rel->relhasoids &&
1048  !OidIsValid(HeapTupleGetOid(&tuple)))
1049  elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
1050  relname, blkno, offnum);
1051 
1052  /*
1053  * Count it as live. Not only is this natural, but it's
1054  * also what acquire_sample_rows() does.
1055  */
1056  live_tuples += 1;
1057 
1058  /*
1059  * Is the tuple definitely visible to all transactions?
1060  *
1061  * NB: Like with per-tuple hint bits, we can't set the
1062  * PD_ALL_VISIBLE flag if the inserter committed
1063  * asynchronously. See SetHintBits for more info. Check
1064  * that the tuple is hinted xmin-committed because of
1065  * that.
1066  */
1067  if (all_visible)
1068  {
1069  TransactionId xmin;
1070 
1072  {
1073  all_visible = false;
1074  break;
1075  }
1076 
1077  /*
1078  * The inserter definitely committed. But is it old
1079  * enough that everyone sees it as committed?
1080  */
1081  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1082  if (!TransactionIdPrecedes(xmin, OldestXmin))
1083  {
1084  all_visible = false;
1085  break;
1086  }
1087 
1088  /* Track newest xmin on page. */
1089  if (TransactionIdFollows(xmin, visibility_cutoff_xid))
1090  visibility_cutoff_xid = xmin;
1091  }
1092  break;
1094 
1095  /*
1096  * If tuple is recently deleted then we must not remove it
1097  * from relation.
1098  */
1099  nkeep += 1;
1100  all_visible = false;
1101  break;
1103 
1104  /*
1105  * This is an expected case during concurrent vacuum.
1106  *
1107  * We do not count these rows as live, because we expect
1108  * the inserting transaction to update the counters at
1109  * commit, and we assume that will happen only after we
1110  * report our results. This assumption is a bit shaky,
1111  * but it is what acquire_sample_rows() does, so be
1112  * consistent.
1113  */
1114  all_visible = false;
1115  break;
1117  /* This is an expected case during concurrent vacuum */
1118  all_visible = false;
1119 
1120  /*
1121  * Count such rows as live. As above, we assume the
1122  * deleting transaction will commit and update the
1123  * counters after we report.
1124  */
1125  live_tuples += 1;
1126  break;
1127  default:
1128  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1129  break;
1130  }
1131 
1132  if (tupgone)
1133  {
1134  lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
1136  &vacrelstats->latestRemovedXid);
1137  tups_vacuumed += 1;
1138  has_dead_tuples = true;
1139  }
1140  else
1141  {
1142  bool tuple_totally_frozen;
1143 
1144  num_tuples += 1;
1145  hastup = true;
1146 
1147  /*
1148  * Each non-removable tuple must be checked to see if it needs
1149  * freezing. Note we already have exclusive buffer lock.
1150  */
1152  relfrozenxid, relminmxid,
1154  &frozen[nfrozen],
1155  &tuple_totally_frozen))
1156  frozen[nfrozen++].offset = offnum;
1157 
1158  if (!tuple_totally_frozen)
1159  all_frozen = false;
1160  }
1161  } /* scan along page */
1162 
1163  /*
1164  * If we froze any tuples, mark the buffer dirty, and write a WAL
1165  * record recording the changes. We must log the changes to be
1166  * crash-safe against future truncation of CLOG.
1167  */
1168  if (nfrozen > 0)
1169  {
1171 
1172  MarkBufferDirty(buf);
1173 
1174  /* execute collected freezes */
1175  for (i = 0; i < nfrozen; i++)
1176  {
1177  ItemId itemid;
1178  HeapTupleHeader htup;
1179 
1180  itemid = PageGetItemId(page, frozen[i].offset);
1181  htup = (HeapTupleHeader) PageGetItem(page, itemid);
1182 
1183  heap_execute_freeze_tuple(htup, &frozen[i]);
1184  }
1185 
1186  /* Now WAL-log freezing if necessary */
1187  if (RelationNeedsWAL(onerel))
1188  {
1189  XLogRecPtr recptr;
1190 
1191  recptr = log_heap_freeze(onerel, buf, FreezeLimit,
1192  frozen, nfrozen);
1193  PageSetLSN(page, recptr);
1194  }
1195 
1196  END_CRIT_SECTION();
1197  }
1198 
1199  /*
1200  * If there are no indexes then we can vacuum the page right now
1201  * instead of doing a second scan.
1202  */
1203  if (nindexes == 0 &&
1204  vacrelstats->num_dead_tuples > 0)
1205  {
1206  /* Remove tuples from heap */
1207  lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
1208  has_dead_tuples = false;
1209 
1210  /*
1211  * Forget the now-vacuumed tuples, and press on, but be careful
1212  * not to reset latestRemovedXid since we want that value to be
1213  * valid.
1214  */
1215  vacrelstats->num_dead_tuples = 0;
1216  vacuumed_pages++;
1217 
1218  /*
1219  * Periodically do incremental FSM vacuuming to make newly-freed
1220  * space visible on upper FSM pages. Note: although we've cleaned
1221  * the current block, we haven't yet updated its FSM entry (that
1222  * happens further down), so passing end == blkno is correct.
1223  */
1224  if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1225  {
1226  FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum,
1227  blkno);
1228  next_fsm_block_to_vacuum = blkno;
1229  }
1230  }
1231 
1232  freespace = PageGetHeapFreeSpace(page);
1233 
1234  /* mark page all-visible, if appropriate */
1235  if (all_visible && !all_visible_according_to_vm)
1236  {
1238 
1239  if (all_frozen)
1240  flags |= VISIBILITYMAP_ALL_FROZEN;
1241 
1242  /*
1243  * It should never be the case that the visibility map page is set
1244  * while the page-level bit is clear, but the reverse is allowed
1245  * (if checksums are not enabled). Regardless, set the both bits
1246  * so that we get back in sync.
1247  *
1248  * NB: If the heap page is all-visible but the VM bit is not set,
1249  * we don't need to dirty the heap page. However, if checksums
1250  * are enabled, we do need to make sure that the heap page is
1251  * dirtied before passing it to visibilitymap_set(), because it
1252  * may be logged. Given that this situation should only happen in
1253  * rare cases after a crash, it is not worth optimizing.
1254  */
1255  PageSetAllVisible(page);
1256  MarkBufferDirty(buf);
1257  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1258  vmbuffer, visibility_cutoff_xid, flags);
1259  }
1260 
1261  /*
1262  * As of PostgreSQL 9.2, the visibility map bit should never be set if
1263  * the page-level bit is clear. However, it's possible that the bit
1264  * got cleared after we checked it and before we took the buffer
1265  * content lock, so we must recheck before jumping to the conclusion
1266  * that something bad has happened.
1267  */
1268  else if (all_visible_according_to_vm && !PageIsAllVisible(page)
1269  && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
1270  {
1271  elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1272  relname, blkno);
1273  visibilitymap_clear(onerel, blkno, vmbuffer,
1275  }
1276 
1277  /*
1278  * It's possible for the value returned by GetOldestXmin() to move
1279  * backwards, so it's not wrong for us to see tuples that appear to
1280  * not be visible to everyone yet, while PD_ALL_VISIBLE is already
1281  * set. The real safe xmin value never moves backwards, but
1282  * GetOldestXmin() is conservative and sometimes returns a value
1283  * that's unnecessarily small, so if we see that contradiction it just
1284  * means that the tuples that we think are not visible to everyone yet
1285  * actually are, and the PD_ALL_VISIBLE flag is correct.
1286  *
1287  * There should never be dead tuples on a page with PD_ALL_VISIBLE
1288  * set, however.
1289  */
1290  else if (PageIsAllVisible(page) && has_dead_tuples)
1291  {
1292  elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
1293  relname, blkno);
1294  PageClearAllVisible(page);
1295  MarkBufferDirty(buf);
1296  visibilitymap_clear(onerel, blkno, vmbuffer,
1298  }
1299 
1300  /*
1301  * If the all-visible page is turned out to be all-frozen but not
1302  * marked, we should so mark it. Note that all_frozen is only valid
1303  * if all_visible is true, so we must check both.
1304  */
1305  else if (all_visible_according_to_vm && all_visible && all_frozen &&
1306  !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
1307  {
1308  /*
1309  * We can pass InvalidTransactionId as the cutoff XID here,
1310  * because setting the all-frozen bit doesn't cause recovery
1311  * conflicts.
1312  */
1313  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1314  vmbuffer, InvalidTransactionId,
1316  }
1317 
1318  UnlockReleaseBuffer(buf);
1319 
1320  /* Remember the location of the last page with nonremovable tuples */
1321  if (hastup)
1322  vacrelstats->nonempty_pages = blkno + 1;
1323 
1324  /*
1325  * If we remembered any tuples for deletion, then the page will be
1326  * visited again by lazy_vacuum_heap, which will compute and record
1327  * its post-compaction free space. If not, then we're done with this
1328  * page, so remember its free space as-is. (This path will always be
1329  * taken if there are no indexes.)
1330  */
1331  if (vacrelstats->num_dead_tuples == prev_dead_count)
1332  RecordPageWithFreeSpace(onerel, blkno, freespace);
1333  }
1334 
1335  /* report that everything is scanned and vacuumed */
1337 
1338  pfree(frozen);
1339 
1340  /* save stats for use later */
1341  vacrelstats->tuples_deleted = tups_vacuumed;
1342  vacrelstats->new_dead_tuples = nkeep;
1343 
1344  /* now we can compute the new value for pg_class.reltuples */
1345  vacrelstats->new_live_tuples = vac_estimate_reltuples(onerel,
1346  nblocks,
1347  vacrelstats->tupcount_pages,
1348  live_tuples);
1349 
1350  /* also compute total number of surviving heap entries */
1351  vacrelstats->new_rel_tuples =
1352  vacrelstats->new_live_tuples + vacrelstats->new_dead_tuples;
1353 
1354  /*
1355  * Release any remaining pin on visibility map page.
1356  */
1357  if (BufferIsValid(vmbuffer))
1358  {
1359  ReleaseBuffer(vmbuffer);
1360  vmbuffer = InvalidBuffer;
1361  }
1362 
1363  /* If any tuples need to be deleted, perform final vacuum cycle */
1364  /* XXX put a threshold on min number of tuples here? */
1365  if (vacrelstats->num_dead_tuples > 0)
1366  {
1367  const int hvp_index[] = {
1369  PROGRESS_VACUUM_NUM_INDEX_VACUUMS
1370  };
1371  int64 hvp_val[2];
1372 
1373  /* Log cleanup info before we touch indexes */
1374  vacuum_log_cleanup_info(onerel, vacrelstats);
1375 
1376  /* Report that we are now vacuuming indexes */
1379 
1380  /* Remove index entries */
1381  for (i = 0; i < nindexes; i++)
1382  lazy_vacuum_index(Irel[i],
1383  &indstats[i],
1384  vacrelstats);
1385 
1386  /* Report that we are now vacuuming the heap */
1387  hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
1388  hvp_val[1] = vacrelstats->num_index_scans + 1;
1389  pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
1390 
1391  /* Remove tuples from heap */
1394  lazy_vacuum_heap(onerel, vacrelstats);
1395  vacrelstats->num_index_scans++;
1396  }
1397 
1398  /*
1399  * Vacuum the remainder of the Free Space Map. We must do this whether or
1400  * not there were indexes.
1401  */
1402  if (blkno > next_fsm_block_to_vacuum)
1403  FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno);
1404 
1405  /* report all blocks vacuumed; and that we're cleaning up */
1409 
1410  /* Do post-vacuum cleanup and statistics update for each index */
1411  for (i = 0; i < nindexes; i++)
1412  lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
1413 
1414  /* If no indexes, make log report that lazy_vacuum_heap would've made */
1415  if (vacuumed_pages)
1416  ereport(elevel,
1417  (errmsg("\"%s\": removed %.0f row versions in %u pages",
1418  RelationGetRelationName(onerel),
1419  tups_vacuumed, vacuumed_pages)));
1420 
1421  /*
1422  * This is pretty messy, but we split it up so that we can skip emitting
1423  * individual parts of the message when not applicable.
1424  */
1425  initStringInfo(&buf);
1426  appendStringInfo(&buf,
1427  _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
1428  nkeep, OldestXmin);
1429  appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
1430  nunused);
1431  appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
1432  "Skipped %u pages due to buffer pins, ",
1433  vacrelstats->pinskipped_pages),
1434  vacrelstats->pinskipped_pages);
1435  appendStringInfo(&buf, ngettext("%u frozen page.\n",
1436  "%u frozen pages.\n",
1437  vacrelstats->frozenskipped_pages),
1438  vacrelstats->frozenskipped_pages);
1439  appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
1440  "%u pages are entirely empty.\n",
1441  empty_pages),
1442  empty_pages);
1443  appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0));
1444 
1445  ereport(elevel,
1446  (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
1447  RelationGetRelationName(onerel),
1448  tups_vacuumed, num_tuples,
1449  vacrelstats->scanned_pages, nblocks),
1450  errdetail_internal("%s", buf.data)));
1451  pfree(buf.data);
1452 }
double new_rel_tuples
Definition: vacuumlazy.c:126
void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, TransactionId *latestRemovedXid)
Definition: heapam.c:7544
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
Definition: vacuumlazy.c:1532
int heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin, bool report_stats, TransactionId *latestRemovedXid)
Definition: pruneheap.c:181
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3603
XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
Definition: heapam.c:7663
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define PageIsEmpty(page)
Definition: bufpage.h:218
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1009
BlockNumber rel_pages
Definition: vacuumlazy.c:120
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:779
OffsetNumber offset
Definition: heapam_xlog.h:321
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1467
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
static void lazy_record_dead_tuple(LVRelStats *vacrelstats, ItemPointer itemptr)
Definition: vacuumlazy.c:2113
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:334
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:474
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:181
#define PROGRESS_VACUUM_MAX_DEAD_TUPLES
Definition: progress.h:26
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:31
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
double tuples_deleted
Definition: vacuumlazy.c:130
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
#define ExclusiveLock
Definition: lockdefs.h:44
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3093
#define VISIBILITYMAP_ALL_FROZEN
Definition: visibilitymap.h:27
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
BlockNumber tupcount_pages
Definition: vacuumlazy.c:124
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define ItemIdIsUsed(itemId)
Definition: itemid.h:91
#define MaxHeapTuplesPerPage
Definition: htup_details.h:592
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:34
unsigned char uint8
Definition: c.h:323
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define InvalidBuffer
Definition: buf.h:25
static void lazy_cleanup_index(Relation indrel, IndexBulkDeleteResult *stats, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1714
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
Definition: tqual.c:1164
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
BlockNumber scanned_pages
Definition: vacuumlazy.c:121
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
BlockNumber pinskipped_pages
Definition: vacuumlazy.c:122
Form_pg_class rd_rel
Definition: rel.h:84
#define ItemIdIsDead(itemId)
Definition: itemid.h:112
#define OidIsValid(objectId)
Definition: c.h:605
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
int errdetail_internal(const char *fmt,...)
Definition: elog.c:900
uint16 OffsetNumber
Definition: off.h:24
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6797
#define FORCE_CHECK_PAGE()
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:694
void pfree(void *pointer)
Definition: mcxt.c:1031
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:78
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:92
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3718
#define ERROR
Definition: elog.h:43
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:662
int max_dead_tuples
Definition: vacuumlazy.c:135
ItemPointerData t_self
Definition: htup.h:65
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:329
static TransactionId FreezeLimit
Definition: vacuumlazy.c:147
uint32 t_len
Definition: htup.h:64
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:7009
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3051
static char * buf
Definition: pg_test_fsync.c:67
#define PageSetAllVisible(page)
Definition: bufpage.h:383
#define FirstOffsetNumber
Definition: off.h:27
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:148
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:441
static TransactionId OldestXmin
Definition: vacuumlazy.c:146
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
int num_dead_tuples
Definition: vacuumlazy.c:134
#define ereport(elevel, rest)
Definition: elog.h:122
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
double new_live_tuples
Definition: vacuumlazy.c:127
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:106
void initStringInfo(StringInfo str)
Definition: stringinfo.c:46
#define WARNING
Definition: elog.h:40
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:332
static int elevel
Definition: vacuumlazy.c:144
#define ngettext(s, p, n)
Definition: c.h:1022
void * palloc0(Size size)
Definition: mcxt.c:955
void UnlockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:382
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:147
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
int num_index_scans
Definition: vacuumlazy.c:137
TransactionId relminmxid
Definition: pg_class.h:72
static void vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:430
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:150
#define PageClearAllVisible(page)
Definition: bufpage.h:385
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:703
#define Assert(condition)
Definition: c.h:699
double new_dead_tuples
Definition: vacuumlazy.c:128
TransactionId latestRemovedXid
Definition: vacuumlazy.c:138
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1682
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:33
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:318
#define VM_ALL_VISIBLE(r, b, v)
Definition: visibilitymap.h:32
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
Definition: pgstat.c:3115
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
size_t Size
Definition: c.h:433
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:30
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
#define RelationNeedsWAL(relation)
Definition: rel.h:510
#define VISIBILITYMAP_ALL_VISIBLE
Definition: visibilitymap.h:26
#define PageGetLSN(page)
Definition: bufpage.h:362
BlockNumber nonempty_pages
Definition: vacuumlazy.c:131
#define PageIsNew(page)
Definition: bufpage.h:225
void * palloc(Size size)
Definition: mcxt.c:924
int errmsg(const char *fmt,...)
Definition: elog.c:797
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
BlockNumber frozenskipped_pages
Definition: vacuumlazy.c:123
TransactionId relfrozenxid
Definition: pg_class.h:71
int i
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup)
Definition: vacuumlazy.c:1630
#define elog
Definition: elog.h:219
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:712
void vacuum_delay_point(void)
Definition: vacuum.c:1672
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define _(x)
Definition: elog.c:84
#define RelationGetRelid(relation)
Definition: rel.h:407
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:368
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:32
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:105
static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
Definition: vacuumlazy.c:2078
#define RelationGetNamespace(relation)
Definition: rel.h:448
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:41

◆ lazy_space_alloc()

static void lazy_space_alloc ( LVRelStats vacrelstats,
BlockNumber  relblocks 
)
static

Definition at line 2078 of file vacuumlazy.c.

References autovacuum_work_mem, LVRelStats::dead_tuples, LVRelStats::hasindex, IsAutoVacuumWorkerProcess(), LAZY_ALLOC_TUPLES, maintenance_work_mem, Max, LVRelStats::max_dead_tuples, MaxAllocSize, MaxHeapTuplesPerPage, Min, LVRelStats::num_dead_tuples, and palloc().

Referenced by lazy_scan_heap().

2079 {
2080  long maxtuples;
2081  int vac_work_mem = IsAutoVacuumWorkerProcess() &&
2082  autovacuum_work_mem != -1 ?
2084 
2085  if (vacrelstats->hasindex)
2086  {
2087  maxtuples = (vac_work_mem * 1024L) / sizeof(ItemPointerData);
2088  maxtuples = Min(maxtuples, INT_MAX);
2089  maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
2090 
2091  /* curious coding here to ensure the multiplication can't overflow */
2092  if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
2093  maxtuples = relblocks * LAZY_ALLOC_TUPLES;
2094 
2095  /* stay sane if small maintenance_work_mem */
2096  maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
2097  }
2098  else
2099  {
2100  maxtuples = MaxHeapTuplesPerPage;
2101  }
2102 
2103  vacrelstats->num_dead_tuples = 0;
2104  vacrelstats->max_dead_tuples = (int) maxtuples;
2105  vacrelstats->dead_tuples = (ItemPointer)
2106  palloc(maxtuples * sizeof(ItemPointerData));
2107 }
int autovacuum_work_mem
Definition: autovacuum.c:115
#define Min(x, y)
Definition: c.h:857
#define MaxHeapTuplesPerPage
Definition: htup_details.h:592
uint32 BlockNumber
Definition: block.h:31
ItemPointerData * ItemPointer
Definition: itemptr.h:49
int max_dead_tuples
Definition: vacuumlazy.c:135
ItemPointer dead_tuples
Definition: vacuumlazy.c:136
int num_dead_tuples
Definition: vacuumlazy.c:134
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3295
#define MaxAllocSize
Definition: memutils.h:40
bool hasindex
Definition: vacuumlazy.c:117
int maintenance_work_mem
Definition: globals.c:123
#define Max(x, y)
Definition: c.h:851
void * palloc(Size size)
Definition: mcxt.c:924
#define LAZY_ALLOC_TUPLES
Definition: vacuumlazy.c:100

◆ lazy_tid_reaped()

static bool lazy_tid_reaped ( ItemPointer  itemptr,
void *  state 
)
static

Definition at line 2138 of file vacuumlazy.c.

References LVRelStats::dead_tuples, LVRelStats::num_dead_tuples, and vac_cmp_itemptr().

Referenced by lazy_vacuum_index().

2139 {
2140  LVRelStats *vacrelstats = (LVRelStats *) state;
2141  ItemPointer res;
2142 
2143  res = (ItemPointer) bsearch((void *) itemptr,
2144  (void *) vacrelstats->dead_tuples,
2145  vacrelstats->num_dead_tuples,
2146  sizeof(ItemPointerData),
2147  vac_cmp_itemptr);
2148 
2149  return (res != NULL);
2150 }
ItemPointerData * ItemPointer
Definition: itemptr.h:49
ItemPointer dead_tuples
Definition: vacuumlazy.c:136
int num_dead_tuples
Definition: vacuumlazy.c:134
Definition: regguts.h:298
static int vac_cmp_itemptr(const void *left, const void *right)
Definition: vacuumlazy.c:2156

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 1808 of file vacuumlazy.c.

References AccessExclusiveLock, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), elevel, ereport, errdetail_internal(), errmsg(), LVRelStats::lock_waiter_detected, LVRelStats::nonempty_pages, LVRelStats::old_rel_pages, LVRelStats::pages_removed, pg_rusage_init(), pg_rusage_show(), pg_usleep(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelStats::rel_pages, RelationGetNumberOfBlocks, RelationGetRelationName, RelationTruncate(), UnlockRelation(), VACUUM_TRUNCATE_LOCK_TIMEOUT, and VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL.

Referenced by lazy_vacuum_rel().

1809 {
1810  BlockNumber old_rel_pages = vacrelstats->rel_pages;
1811  BlockNumber new_rel_pages;
1812  PGRUsage ru0;
1813  int lock_retry;
1814 
1815  pg_rusage_init(&ru0);
1816 
1817  /* Report that we are now truncating */
1820 
1821  /*
1822  * Loop until no more truncating can be done.
1823  */
1824  do
1825  {
1826  /*
1827  * We need full exclusive lock on the relation in order to do
1828  * truncation. If we can't get it, give up rather than waiting --- we
1829  * don't want to block other backends, and we don't want to deadlock
1830  * (which is quite possible considering we already hold a lower-grade
1831  * lock).
1832  */
1833  vacrelstats->lock_waiter_detected = false;
1834  lock_retry = 0;
1835  while (true)
1836  {
1838  break;
1839 
1840  /*
1841  * Check for interrupts while trying to (re-)acquire the exclusive
1842  * lock.
1843  */
1845 
1846  if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
1848  {
1849  /*
1850  * We failed to establish the lock in the specified number of
1851  * retries. This means we give up truncating.
1852  */
1853  vacrelstats->lock_waiter_detected = true;
1854  ereport(elevel,
1855  (errmsg("\"%s\": stopping truncate due to conflicting lock request",
1856  RelationGetRelationName(onerel))));
1857  return;
1858  }
1859 
1861  }
1862 
1863  /*
1864  * Now that we have exclusive lock, look to see if the rel has grown
1865  * whilst we were vacuuming with non-exclusive lock. If so, give up;
1866  * the newly added pages presumably contain non-deletable tuples.
1867  */
1868  new_rel_pages = RelationGetNumberOfBlocks(onerel);
1869  if (new_rel_pages != old_rel_pages)
1870  {
1871  /*
1872  * Note: we intentionally don't update vacrelstats->rel_pages with
1873  * the new rel size here. If we did, it would amount to assuming
1874  * that the new pages are empty, which is unlikely. Leaving the
1875  * numbers alone amounts to assuming that the new pages have the
1876  * same tuple density as existing ones, which is less unlikely.
1877  */
1879  return;
1880  }
1881 
1882  /*
1883  * Scan backwards from the end to verify that the end pages actually
1884  * contain no tuples. This is *necessary*, not optional, because
1885  * other backends could have added tuples to these pages whilst we
1886  * were vacuuming.
1887  */
1888  new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1889 
1890  if (new_rel_pages >= old_rel_pages)
1891  {
1892  /* can't do anything after all */
1894  return;
1895  }
1896 
1897  /*
1898  * Okay to truncate.
1899  */
1900  RelationTruncate(onerel, new_rel_pages);
1901 
1902  /*
1903  * We can release the exclusive lock as soon as we have truncated.
1904  * Other backends can't safely access the relation until they have
1905  * processed the smgr invalidation that smgrtruncate sent out ... but
1906  * that should happen as part of standard invalidation processing once
1907  * they acquire lock on the relation.
1908  */
1910 
1911  /*
1912  * Update statistics. Here, it *is* correct to adjust rel_pages
1913  * without also touching reltuples, since the tuple count wasn't
1914  * changed by the truncation.
1915  */
1916  vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
1917  vacrelstats->rel_pages = new_rel_pages;
1918 
1919  ereport(elevel,
1920  (errmsg("\"%s\": truncated %u to %u pages",
1921  RelationGetRelationName(onerel),
1922  old_rel_pages, new_rel_pages),
1923  errdetail_internal("%s",
1924  pg_rusage_show(&ru0))));
1925  old_rel_pages = new_rel_pages;
1926  } while (new_rel_pages > vacrelstats->nonempty_pages &&
1927  vacrelstats->lock_waiter_detected);
1928 }
static BlockNumber count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1936
BlockNumber rel_pages
Definition: vacuumlazy.c:120
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:257
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3093
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:84
uint32 BlockNumber
Definition: block.h:31
int errdetail_internal(const char *fmt,...)
Definition: elog.c:900
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:34
void pg_usleep(long microsec)
Definition: signal.c:53
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:226
#define RelationGetRelationName(relation)
Definition: rel.h:441
#define ereport(elevel, rest)
Definition: elog.h:122
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:83
static int elevel
Definition: vacuumlazy.c:144
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define AccessExclusiveLock
Definition: lockdefs.h:45
BlockNumber pages_removed
Definition: vacuumlazy.c:129
BlockNumber nonempty_pages
Definition: vacuumlazy.c:131
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:98
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:226
bool lock_waiter_detected
Definition: vacuumlazy.c:139

◆ lazy_vacuum_heap()

static void lazy_vacuum_heap ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 1467 of file vacuumlazy.c.

References buf, BufferGetPage, BufferIsValid, ConditionalLockBufferForCleanup(), LVRelStats::dead_tuples, elevel, ereport, errdetail_internal(), errmsg(), InvalidBuffer, ItemPointerGetBlockNumber, lazy_vacuum_page(), MAIN_FORKNUM, LVRelStats::num_dead_tuples, PageGetHeapFreeSpace(), pg_rusage_init(), pg_rusage_show(), RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), RelationGetRelationName, ReleaseBuffer(), UnlockReleaseBuffer(), and vacuum_delay_point().

Referenced by lazy_scan_heap().

1468 {
1469  int tupindex;
1470  int npages;
1471  PGRUsage ru0;
1472  Buffer vmbuffer = InvalidBuffer;
1473 
1474  pg_rusage_init(&ru0);
1475  npages = 0;
1476 
1477  tupindex = 0;
1478  while (tupindex < vacrelstats->num_dead_tuples)
1479  {
1480  BlockNumber tblk;
1481  Buffer buf;
1482  Page page;
1483  Size freespace;
1484 
1486 
1487  tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1488  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
1489  vac_strategy);
1491  {
1492  ReleaseBuffer(buf);
1493  ++tupindex;
1494  continue;
1495  }
1496  tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
1497  &vmbuffer);
1498 
1499  /* Now that we've compacted the page, record its available space */
1500  page = BufferGetPage(buf);
1501  freespace = PageGetHeapFreeSpace(page);
1502 
1503  UnlockReleaseBuffer(buf);
1504  RecordPageWithFreeSpace(onerel, tblk, freespace);
1505  npages++;
1506  }
1507 
1508  if (BufferIsValid(vmbuffer))
1509  {
1510  ReleaseBuffer(vmbuffer);
1511  vmbuffer = InvalidBuffer;
1512  }
1513 
1514  ereport(elevel,
1515  (errmsg("\"%s\": removed %d row versions in %d pages",
1516  RelationGetRelationName(onerel),
1517  tupindex, npages),
1518  errdetail_internal("%s", pg_rusage_show(&ru0))));
1519 }
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
Definition: vacuumlazy.c:1532
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:181
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
#define InvalidBuffer
Definition: buf.h:25
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
int errdetail_internal(const char *fmt,...)
Definition: elog.c:900
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3718
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:662
ItemPointer dead_tuples
Definition: vacuumlazy.c:136
static char * buf
Definition: pg_test_fsync.c:67
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define RelationGetRelationName(relation)
Definition: rel.h:441
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
static int elevel
Definition: vacuumlazy.c:144
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:150
size_t Size
Definition: c.h:433
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void vacuum_delay_point(void)
Definition: vacuum.c:1672
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74

◆ lazy_vacuum_index()

static void lazy_vacuum_index ( Relation  indrel,
IndexBulkDeleteResult **  stats,
LVRelStats vacrelstats 
)
static

Definition at line 1682 of file vacuumlazy.c.

References IndexVacuumInfo::analyze_only, elevel, ereport, errdetail_internal(), errmsg(), IndexVacuumInfo::estimated_count, IndexVacuumInfo::index, index_bulk_delete(), lazy_tid_reaped(), IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, LVRelStats::old_live_tuples, pg_rusage_init(), pg_rusage_show(), RelationGetRelationName, IndexVacuumInfo::strategy, and vac_strategy.

Referenced by lazy_scan_heap().

1685 {
1686  IndexVacuumInfo ivinfo;
1687  PGRUsage ru0;
1688 
1689  pg_rusage_init(&ru0);
1690 
1691  ivinfo.index = indrel;
1692  ivinfo.analyze_only = false;
1693  ivinfo.estimated_count = true;
1694  ivinfo.message_level = elevel;
1695  /* We can only provide an approximate value of num_heap_tuples here */
1696  ivinfo.num_heap_tuples = vacrelstats->old_live_tuples;
1697  ivinfo.strategy = vac_strategy;
1698 
1699  /* Do bulk deletion */
1700  *stats = index_bulk_delete(&ivinfo, *stats,
1701  lazy_tid_reaped, (void *) vacrelstats);
1702 
1703  ereport(elevel,
1704  (errmsg("scanned index \"%s\" to remove %d row versions",
1705  RelationGetRelationName(indrel),
1706  vacrelstats->num_dead_tuples),
1707  errdetail_internal("%s", pg_rusage_show(&ru0))));
1708 }
static bool lazy_tid_reaped(ItemPointer itemptr, void *state)
Definition: vacuumlazy.c:2138
bool analyze_only
Definition: genam.h:47
BufferAccessStrategy strategy
Definition: genam.h:51
Relation index
Definition: genam.h:46
int errdetail_internal(const char *fmt,...)
Definition: elog.c:900
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define RelationGetRelationName(relation)
Definition: rel.h:441
int num_dead_tuples
Definition: vacuumlazy.c:134
#define ereport(elevel, rest)
Definition: elog.h:122
static int elevel
Definition: vacuumlazy.c:144
IndexBulkDeleteResult * index_bulk_delete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: indexam.c:743
double old_live_tuples
Definition: vacuumlazy.c:125
int message_level
Definition: genam.h:49
double num_heap_tuples
Definition: genam.h:50
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:150
int errmsg(const char *fmt,...)
Definition: elog.c:797
bool estimated_count
Definition: genam.h:48

◆ lazy_vacuum_page()

static int lazy_vacuum_page ( Relation  onerel,
BlockNumber  blkno,
Buffer  buffer,
int  tupindex,
LVRelStats vacrelstats,
Buffer vmbuffer 
)
static

Definition at line 1532 of file vacuumlazy.c.

References Assert, BufferGetPage, BufferIsValid, LVRelStats::dead_tuples, END_CRIT_SECTION, heap_page_is_all_visible(), InvalidXLogRecPtr, ItemIdSetUnused, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LVRelStats::latestRemovedXid, log_heap_clean(), MarkBufferDirty(), MaxOffsetNumber, LVRelStats::num_dead_tuples, PageGetItemId, PageIsAllVisible, PageRepairFragmentation(), PageSetAllVisible, PageSetLSN, pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, RelationNeedsWAL, START_CRIT_SECTION, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_get_status(), and visibilitymap_set().

Referenced by lazy_scan_heap(), and lazy_vacuum_heap().

1534 {
1535  Page page = BufferGetPage(buffer);
1536  OffsetNumber unused[MaxOffsetNumber];
1537  int uncnt = 0;
1538  TransactionId visibility_cutoff_xid;
1539  bool all_frozen;
1540 
1542 
1544 
1545  for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
1546  {
1547  BlockNumber tblk;
1548  OffsetNumber toff;
1549  ItemId itemid;
1550 
1551  tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1552  if (tblk != blkno)
1553  break; /* past end of tuples for this block */
1554  toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
1555  itemid = PageGetItemId(page, toff);
1556  ItemIdSetUnused(itemid);
1557  unused[uncnt++] = toff;
1558  }
1559 
1561 
1562  /*
1563  * Mark buffer dirty before we write WAL.
1564  */
1566 
1567  /* XLOG stuff */
1568  if (RelationNeedsWAL(onerel))
1569  {
1570  XLogRecPtr recptr;
1571 
1572  recptr = log_heap_clean(onerel, buffer,
1573  NULL, 0, NULL, 0,
1574  unused, uncnt,
1575  vacrelstats->latestRemovedXid);
1576  PageSetLSN(page, recptr);
1577  }
1578 
1579  /*
1580  * End critical section, so we safely can do visibility tests (which
1581  * possibly need to perform IO and allocate memory!). If we crash now the
1582  * page (including the corresponding vm bit) might not be marked all
1583  * visible, but that's fine. A later vacuum will fix that.
1584  */
1585  END_CRIT_SECTION();
1586 
1587  /*
1588  * Now that we have removed the dead tuples from the page, once again
1589  * check if the page has become all-visible. The page is already marked
1590  * dirty, exclusively locked, and, if needed, a full page image has been
1591  * emitted in the log_heap_clean() above.
1592  */
1593  if (heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid,
1594  &all_frozen))
1595  PageSetAllVisible(page);
1596 
1597  /*
1598  * All the changes to the heap page have been done. If the all-visible
1599  * flag is now set, also set the VM all-visible bit (and, if possible, the
1600  * all-frozen bit) unless this has already been done previously.
1601  */
1602  if (PageIsAllVisible(page))
1603  {
1604  uint8 vm_status = visibilitymap_get_status(onerel, blkno, vmbuffer);
1605  uint8 flags = 0;
1606 
1607  /* Set the VM all-frozen bit to flag, if needed */
1608  if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
1609  flags |= VISIBILITYMAP_ALL_VISIBLE;
1610  if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
1611  flags |= VISIBILITYMAP_ALL_FROZEN;
1612 
1613  Assert(BufferIsValid(*vmbuffer));
1614  if (flags != 0)
1615  visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr,
1616  *vmbuffer, visibility_cutoff_xid, flags);
1617  }
1618 
1619  return tupindex;
1620 }
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:474
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
#define MaxOffsetNumber
Definition: off.h:28
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3093
#define VISIBILITYMAP_ALL_FROZEN
Definition: visibilitymap.h:27
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
unsigned char uint8
Definition: c.h:323
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
uint32 BlockNumber
Definition: block.h:31
uint16 OffsetNumber
Definition: off.h:24
ItemPointer dead_tuples
Definition: vacuumlazy.c:136
#define PageSetAllVisible(page)
Definition: bufpage.h:383
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
int num_dead_tuples
Definition: vacuumlazy.c:134
static bool heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:2189
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:699
TransactionId latestRemovedXid
Definition: vacuumlazy.c:138
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
XLogRecPtr log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
Definition: heapam.c:7612
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:510
#define VISIBILITYMAP_ALL_VISIBLE
Definition: visibilitymap.h:26
void PageRepairFragmentation(Page page)
Definition: bufpage.c:479
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define ItemIdSetUnused(itemId)
Definition: itemid.h:127
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
Pointer Page
Definition: bufpage.h:74

◆ lazy_vacuum_rel()

void lazy_vacuum_rel ( Relation  onerel,
int  options,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 190 of file vacuumlazy.c.

References _, appendStringInfo(), Assert, buf, StringInfoData::data, DEBUG2, elevel, ereport, errmsg_internal(), VacuumParams::freeze_min_age, VacuumParams::freeze_table_age, FreezeLimit, LVRelStats::frozenskipped_pages, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), LVRelStats::hasindex, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, IsAutoVacuumWorkerProcess(), lazy_scan_heap(), lazy_truncate_heap(), LVRelStats::lock_waiter_detected, LOG, VacuumParams::log_min_duration, VacuumParams::multixact_freeze_min_age, VacuumParams::multixact_freeze_table_age, MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyDatabaseId, LVRelStats::new_dead_tuples, LVRelStats::new_live_tuples, LVRelStats::new_rel_tuples, NoLock, LVRelStats::num_index_scans, LVRelStats::old_live_tuples, LVRelStats::old_rel_pages, OldestXmin, LVRelStats::pages_removed, palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), LVRelStats::pinskipped_pages, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, RelationData::rd_rel, LVRelStats::rel_pages, RelationGetNamespace, RelationGetRelationName, RelationGetRelid, RowExclusiveLock, LVRelStats::scanned_pages, should_attempt_truncation(), TimestampDifference(), TimestampDifferenceExceeds(), TransactionIdPrecedesOrEquals(), LVRelStats::tupcount_pages, LVRelStats::tuples_deleted, vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, vacuum_set_xid_limits(), VacuumPageDirty, VacuumPageHit, VacuumPageMiss, and visibilitymap_count().

Referenced by vacuum_rel().

192 {
193  LVRelStats *vacrelstats;
194  Relation *Irel;
195  int nindexes;
196  PGRUsage ru0;
197  TimestampTz starttime = 0;
198  long secs;
199  int usecs;
200  double read_rate,
201  write_rate;
202  bool aggressive; /* should we scan all unfrozen pages? */
203  bool scanned_all_unfrozen; /* actually scanned all such pages? */
204  TransactionId xidFullScanLimit;
205  MultiXactId mxactFullScanLimit;
206  BlockNumber new_rel_pages;
207  BlockNumber new_rel_allvisible;
208  double new_live_tuples;
209  TransactionId new_frozen_xid;
210  MultiXactId new_min_multi;
211 
212  Assert(params != NULL);
213 
214  /* measure elapsed time iff autovacuum logging requires it */
215  if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
216  {
217  pg_rusage_init(&ru0);
218  starttime = GetCurrentTimestamp();
219  }
220 
221  if (options & VACOPT_VERBOSE)
222  elevel = INFO;
223  else
224  elevel = DEBUG2;
225 
227  RelationGetRelid(onerel));
228 
229  vac_strategy = bstrategy;
230 
231  vacuum_set_xid_limits(onerel,
232  params->freeze_min_age,
233  params->freeze_table_age,
234  params->multixact_freeze_min_age,
236  &OldestXmin, &FreezeLimit, &xidFullScanLimit,
237  &MultiXactCutoff, &mxactFullScanLimit);
238 
239  /*
240  * We request an aggressive scan if the table's frozen Xid is now older
241  * than or equal to the requested Xid full-table scan limit; or if the
242  * table's minimum MultiXactId is older than or equal to the requested
243  * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
244  */
245  aggressive = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
246  xidFullScanLimit);
247  aggressive |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
248  mxactFullScanLimit);
250  aggressive = true;
251 
252  vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
253 
254  vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
255  vacrelstats->old_live_tuples = onerel->rd_rel->reltuples;
256  vacrelstats->num_index_scans = 0;
257  vacrelstats->pages_removed = 0;
258  vacrelstats->lock_waiter_detected = false;
259 
260  /* Open all indexes of the relation */
261  vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
262  vacrelstats->hasindex = (nindexes > 0);
263 
264  /* Do the vacuuming */
265  lazy_scan_heap(onerel, options, vacrelstats, Irel, nindexes, aggressive);
266 
267  /* Done with indexes */
268  vac_close_indexes(nindexes, Irel, NoLock);
269 
270  /*
271  * Compute whether we actually scanned the all unfrozen pages. If we did,
272  * we can adjust relfrozenxid and relminmxid.
273  *
274  * NB: We need to check this before truncating the relation, because that
275  * will change ->rel_pages.
276  */
277  if ((vacrelstats->scanned_pages + vacrelstats->frozenskipped_pages)
278  < vacrelstats->rel_pages)
279  {
280  Assert(!aggressive);
281  scanned_all_unfrozen = false;
282  }
283  else
284  scanned_all_unfrozen = true;
285 
286  /*
287  * Optionally truncate the relation.
288  */
289  if (should_attempt_truncation(vacrelstats))
290  lazy_truncate_heap(onerel, vacrelstats);
291 
292  /* Report that we are now doing final cleanup */
295 
296  /*
297  * Update statistics in pg_class.
298  *
299  * A corner case here is that if we scanned no pages at all because every
300  * page is all-visible, we should not update relpages/reltuples, because
301  * we have no new information to contribute. In particular this keeps us
302  * from replacing relpages=reltuples=0 (which means "unknown tuple
303  * density") with nonzero relpages and reltuples=0 (which means "zero
304  * tuple density") unless there's some actual evidence for the latter.
305  *
306  * It's important that we use tupcount_pages and not scanned_pages for the
307  * check described above; scanned_pages counts pages where we could not
308  * get cleanup lock, and which were processed only for frozenxid purposes.
309  *
310  * We do update relallvisible even in the corner case, since if the table
311  * is all-visible we'd definitely like to know that. But clamp the value
312  * to be not more than what we're setting relpages to.
313  *
314  * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
315  * since then we don't know for certain that all tuples have a newer xmin.
316  */
317  new_rel_pages = vacrelstats->rel_pages;
318  new_live_tuples = vacrelstats->new_live_tuples;
319  if (vacrelstats->tupcount_pages == 0 && new_rel_pages > 0)
320  {
321  new_rel_pages = vacrelstats->old_rel_pages;
322  new_live_tuples = vacrelstats->old_live_tuples;
323  }
324 
325  visibilitymap_count(onerel, &new_rel_allvisible, NULL);
326  if (new_rel_allvisible > new_rel_pages)
327  new_rel_allvisible = new_rel_pages;
328 
329  new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
330  new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
331 
332  vac_update_relstats(onerel,
333  new_rel_pages,
334  new_live_tuples,
335  new_rel_allvisible,
336  vacrelstats->hasindex,
337  new_frozen_xid,
338  new_min_multi,
339  false);
340 
341  /* report results to the stats collector, too */
343  onerel->rd_rel->relisshared,
344  new_live_tuples,
345  vacrelstats->new_dead_tuples);
347 
348  /* and log the action if appropriate */
349  if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
350  {
351  TimestampTz endtime = GetCurrentTimestamp();
352 
353  if (params->log_min_duration == 0 ||
354  TimestampDifferenceExceeds(starttime, endtime,
355  params->log_min_duration))
356  {
358  char *msgfmt;
359 
360  TimestampDifference(starttime, endtime, &secs, &usecs);
361 
362  read_rate = 0;
363  write_rate = 0;
364  if ((secs > 0) || (usecs > 0))
365  {
366  read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
367  (secs + usecs / 1000000.0);
368  write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
369  (secs + usecs / 1000000.0);
370  }
371 
372  /*
373  * This is pretty messy, but we split it up so that we can skip
374  * emitting individual parts of the message when not applicable.
375  */
376  initStringInfo(&buf);
377  if (aggressive)
378  msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
379  else
380  msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
381  appendStringInfo(&buf, msgfmt,
384  RelationGetRelationName(onerel),
385  vacrelstats->num_index_scans);
386  appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
387  vacrelstats->pages_removed,
388  vacrelstats->rel_pages,
389  vacrelstats->pinskipped_pages,
390  vacrelstats->frozenskipped_pages);
391  appendStringInfo(&buf,
392  _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n"),
393  vacrelstats->tuples_deleted,
394  vacrelstats->new_rel_tuples,
395  vacrelstats->new_dead_tuples,
396  OldestXmin);
397  appendStringInfo(&buf,
398  _("buffer usage: %d hits, %d misses, %d dirtied\n"),
402  appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
403  read_rate, write_rate);
404  appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
405 
406  ereport(LOG,
407  (errmsg_internal("%s", buf.data)));
408  pfree(buf.data);
409  }
410  }
411 }
double new_rel_tuples
Definition: vacuumlazy.c:126
int multixact_freeze_table_age
Definition: vacuum.h:142
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:1651
BlockNumber rel_pages
Definition: vacuumlazy.c:120
uint32 TransactionId
Definition: c.h:474
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
Definition: pgstat.c:3072
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1570
double tuples_deleted
Definition: vacuumlazy.c:130
int64 TimestampTz
Definition: timestamp.h:39
int VacuumPageHit
Definition: globals.c:144
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3093
BlockNumber tupcount_pages
Definition: vacuumlazy.c:124
static void lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, Relation *Irel, int nindexes, bool aggressive)
Definition: vacuumlazy.c:463
BlockNumber scanned_pages
Definition: vacuumlazy.c:121
#define INFO
Definition: elog.h:33
void vacuum_set_xid_limits(Relation rel, int freeze_min_age, int freeze_table_age, int multixact_freeze_min_age, int multixact_freeze_table_age, TransactionId *oldestXmin, TransactionId *freezeLimit, TransactionId *xidFullScanLimit, MultiXactId *multiXactCutoff, MultiXactId *mxactFullScanLimit)
Definition: vacuum.c:593
uint32 BlockNumber
Definition: block.h:31
BlockNumber pinskipped_pages
Definition: vacuumlazy.c:122
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
#define LOG
Definition: elog.h:26
Form_pg_class rd_rel
Definition: rel.h:84
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1649
int freeze_table_age
Definition: vacuum.h:139
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
BlockNumber old_rel_pages
Definition: vacuumlazy.c:119
void pfree(void *pointer)
Definition: mcxt.c:1031
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:78
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:319
int freeze_min_age
Definition: vacuum.h:138
char * get_database_name(Oid dbid)
Definition: dbcommands.c:2056
#define DEBUG2
Definition: elog.h:24
static TransactionId FreezeLimit
Definition: vacuumlazy.c:147
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3051
#define NoLock
Definition: lockdefs.h:34
static char * buf
Definition: pg_test_fsync.c:67
#define RowExclusiveLock
Definition: lockdefs.h:38
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:148
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:441
static TransactionId OldestXmin
Definition: vacuumlazy.c:146
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3295
#define ereport(elevel, rest)
Definition: elog.h:122
double new_live_tuples
Definition: vacuumlazy.c:127
void initStringInfo(StringInfo str)
Definition: stringinfo.c:46
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:1608
static int elevel
Definition: vacuumlazy.c:144
bool hasindex
Definition: vacuumlazy.c:117
int VacuumPageDirty
Definition: globals.c:146
void * palloc0(Size size)
Definition: mcxt.c:955
void pgstat_progress_end_command(void)
Definition: pgstat.c:3144
Oid MyDatabaseId
Definition: globals.c:86
#define InvalidMultiXactId
Definition: multixact.h:23
static bool should_attempt_truncation(LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1790
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:35
int num_index_scans
Definition: vacuumlazy.c:137
double old_live_tuples
Definition: vacuumlazy.c:125
TransactionId MultiXactId
Definition: c.h:484
int errmsg_internal(const char *fmt,...)
Definition: elog.c:827
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:150
#define Assert(condition)
Definition: c.h:699
double new_dead_tuples
Definition: vacuumlazy.c:128
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
int log_min_duration
Definition: vacuum.h:145
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
Definition: pgstat.c:1405
BlockNumber pages_removed
Definition: vacuumlazy.c:129
BlockNumber frozenskipped_pages
Definition: vacuumlazy.c:123
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3154
int VacuumPageMiss
Definition: globals.c:145
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1624
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool in_outer_xact)
Definition: vacuum.c:864
#define _(x)
Definition: elog.c:84
#define RelationGetRelid(relation)
Definition: rel.h:407
int multixact_freeze_min_age
Definition: vacuum.h:140
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1808
#define RelationGetNamespace(relation)
Definition: rel.h:448
bool lock_waiter_detected
Definition: vacuumlazy.c:139

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelStats vacrelstats)
static

Definition at line 1790 of file vacuumlazy.c.

References LVRelStats::nonempty_pages, old_snapshot_threshold, LVRelStats::rel_pages, REL_TRUNCATE_FRACTION, and REL_TRUNCATE_MINIMUM.

Referenced by lazy_vacuum_rel().

1791 {
1792  BlockNumber possibly_freeable;
1793 
1794  possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
1795  if (possibly_freeable > 0 &&
1796  (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
1797  possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
1799  return true;
1800  else
1801  return false;
1802 }
BlockNumber rel_pages
Definition: vacuumlazy.c:120
uint32 BlockNumber
Definition: block.h:31
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:72
BlockNumber nonempty_pages
Definition: vacuumlazy.c:131
int old_snapshot_threshold
Definition: snapmgr.c:75
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:73

◆ vac_cmp_itemptr()

static int vac_cmp_itemptr ( const void *  left,
const void *  right 
)
static

Definition at line 2156 of file vacuumlazy.c.

References ItemPointerGetBlockNumber, and ItemPointerGetOffsetNumber.

Referenced by lazy_tid_reaped().

2157 {
2158  BlockNumber lblk,
2159  rblk;
2160  OffsetNumber loff,
2161  roff;
2162 
2163  lblk = ItemPointerGetBlockNumber((ItemPointer) left);
2164  rblk = ItemPointerGetBlockNumber((ItemPointer) right);
2165 
2166  if (lblk < rblk)
2167  return -1;
2168  if (lblk > rblk)
2169  return 1;
2170 
2171  loff = ItemPointerGetOffsetNumber((ItemPointer) left);
2172  roff = ItemPointerGetOffsetNumber((ItemPointer) right);
2173 
2174  if (loff < roff)
2175  return -1;
2176  if (loff > roff)
2177  return 1;
2178 
2179  return 0;
2180 }
uint32 BlockNumber
Definition: block.h:31
uint16 OffsetNumber
Definition: off.h:24
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76

◆ vacuum_log_cleanup_info()

static void vacuum_log_cleanup_info ( Relation  rel,
LVRelStats vacrelstats 
)
static

Definition at line 430 of file vacuumlazy.c.

References LVRelStats::latestRemovedXid, log_heap_cleanup_info(), RelationData::rd_node, RelationNeedsWAL, TransactionIdIsValid, and XLogIsNeeded.

Referenced by lazy_scan_heap().

431 {
432  /*
433  * Skip this for relations for which no WAL is to be written, or if we're
434  * not trying to support archive recovery.
435  */
436  if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
437  return;
438 
439  /*
440  * No need to write the record at all unless it contains a valid value
441  */
442  if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
443  (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
444 }
XLogRecPtr log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
Definition: heapam.c:7583
#define XLogIsNeeded()
Definition: xlog.h:146
RelFileNode rd_node
Definition: rel.h:55
TransactionId latestRemovedXid
Definition: vacuumlazy.c:138
#define RelationNeedsWAL(relation)
Definition: rel.h:510
#define TransactionIdIsValid(xid)
Definition: transam.h:41

Variable Documentation

◆ elevel

◆ FreezeLimit

TransactionId FreezeLimit
static

Definition at line 147 of file vacuumlazy.c.

Referenced by lazy_check_needs_freeze(), lazy_scan_heap(), and lazy_vacuum_rel().

◆ MultiXactCutoff

MultiXactId MultiXactCutoff
static

◆ OldestXmin

◆ vac_strategy

BufferAccessStrategy vac_strategy
static

Definition at line 150 of file vacuumlazy.c.

Referenced by lazy_cleanup_index(), and lazy_vacuum_index().