PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xlog.h"
#include "catalog/catalog.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
#include "utils/tqual.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelStats
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define LAZY_ALLOC_TUPLES   MaxHeapTuplesPerPage
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define FORCE_CHECK_PAGE()   (blkno == nblocks - 1 && should_attempt_truncation(vacrelstats))
 

Typedefs

typedef struct LVRelStats LVRelStats
 

Functions

static void lazy_scan_heap (Relation onerel, int options, LVRelStats *vacrelstats, Relation *Irel, int nindexes, bool aggressive)
 
static void lazy_vacuum_heap (Relation onerel, LVRelStats *vacrelstats)
 
static bool lazy_check_needs_freeze (Buffer buf, bool *hastup)
 
static void lazy_vacuum_index (Relation indrel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats)
 
static void lazy_cleanup_index (Relation indrel, IndexBulkDeleteResult *stats, LVRelStats *vacrelstats)
 
static int lazy_vacuum_page (Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
 
static bool should_attempt_truncation (LVRelStats *vacrelstats)
 
static void lazy_truncate_heap (Relation onerel, LVRelStats *vacrelstats)
 
static BlockNumber count_nondeletable_pages (Relation onerel, LVRelStats *vacrelstats)
 
static void lazy_space_alloc (LVRelStats *vacrelstats, BlockNumber relblocks)
 
static void lazy_record_dead_tuple (LVRelStats *vacrelstats, ItemPointer itemptr)
 
static bool lazy_tid_reaped (ItemPointer itemptr, void *state)
 
static int vac_cmp_itemptr (const void *left, const void *right)
 
static bool heap_page_is_all_visible (Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
void lazy_vacuum_rel (Relation onerel, int options, VacuumParams *params, BufferAccessStrategy bstrategy)
 
static void vacuum_log_cleanup_info (Relation rel, LVRelStats *vacrelstats)
 

Variables

static int elevel = -1
 
static TransactionId OldestXmin
 
static TransactionId FreezeLimit
 
static MultiXactId MultiXactCutoff
 
static BufferAccessStrategy vac_strategy
 

Macro Definition Documentation

#define FORCE_CHECK_PAGE ( )    (blkno == nblocks - 1 && should_attempt_truncation(vacrelstats))

Referenced by lazy_scan_heap().

#define LAZY_ALLOC_TUPLES   MaxHeapTuplesPerPage

Definition at line 93 of file vacuumlazy.c.

Referenced by lazy_space_alloc().

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 105 of file vacuumlazy.c.

Referenced by count_nondeletable_pages().

#define REL_TRUNCATE_FRACTION   16

Definition at line 75 of file vacuumlazy.c.

Referenced by should_attempt_truncation().

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 74 of file vacuumlazy.c.

Referenced by should_attempt_truncation().

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 99 of file vacuumlazy.c.

Referenced by lazy_scan_heap().

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 84 of file vacuumlazy.c.

Referenced by count_nondeletable_pages().

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 86 of file vacuumlazy.c.

Referenced by lazy_truncate_heap().

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 85 of file vacuumlazy.c.

Referenced by lazy_truncate_heap().

Typedef Documentation

Function Documentation

static BlockNumber count_nondeletable_pages ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 1844 of file vacuumlazy.c.

References AccessExclusiveLock, buf, BUFFER_LOCK_SHARE, BufferGetPage, CHECK_FOR_INTERRUPTS, elevel, ereport, errmsg(), FirstOffsetNumber, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LVRelStats::lock_waiter_detected, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelStats::nonempty_pages, OffsetNumberNext, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, PageIsNew, PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelStats::rel_pages, RelationGetRelationName, StaticAssertStmt, UnlockReleaseBuffer(), and VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL.

Referenced by lazy_truncate_heap().

1845 {
1846  BlockNumber blkno;
1847  BlockNumber prefetchedUntil;
1848  instr_time starttime;
1849 
1850  /* Initialize the starttime if we check for conflicting lock requests */
1851  INSTR_TIME_SET_CURRENT(starttime);
1852 
1853  /*
1854  * Start checking blocks at what we believe relation end to be and move
1855  * backwards. (Strange coding of loop control is needed because blkno is
1856  * unsigned.) To make the scan faster, we prefetch a few blocks at a time
1857  * in forward direction, so that OS-level readahead can kick in.
1858  */
1859  blkno = vacrelstats->rel_pages;
1861  "prefetch size must be power of 2");
1862  prefetchedUntil = InvalidBlockNumber;
1863  while (blkno > vacrelstats->nonempty_pages)
1864  {
1865  Buffer buf;
1866  Page page;
1867  OffsetNumber offnum,
1868  maxoff;
1869  bool hastup;
1870 
1871  /*
1872  * Check if another process requests a lock on our relation. We are
1873  * holding an AccessExclusiveLock here, so they will be waiting. We
1874  * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
1875  * only check if that interval has elapsed once every 32 blocks to
1876  * keep the number of system calls and actual shared lock table
1877  * lookups to a minimum.
1878  */
1879  if ((blkno % 32) == 0)
1880  {
1881  instr_time currenttime;
1882  instr_time elapsed;
1883 
1884  INSTR_TIME_SET_CURRENT(currenttime);
1885  elapsed = currenttime;
1886  INSTR_TIME_SUBTRACT(elapsed, starttime);
1887  if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
1889  {
1891  {
1892  ereport(elevel,
1893  (errmsg("\"%s\": suspending truncate due to conflicting lock request",
1894  RelationGetRelationName(onerel))));
1895 
1896  vacrelstats->lock_waiter_detected = true;
1897  return blkno;
1898  }
1899  starttime = currenttime;
1900  }
1901  }
1902 
1903  /*
1904  * We don't insert a vacuum delay point here, because we have an
1905  * exclusive lock on the table which we want to hold for as short a
1906  * time as possible. We still need to check for interrupts however.
1907  */
1909 
1910  blkno--;
1911 
1912  /* If we haven't prefetched this lot yet, do so now. */
1913  if (prefetchedUntil > blkno)
1914  {
1915  BlockNumber prefetchStart;
1916  BlockNumber pblkno;
1917 
1918  prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
1919  for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
1920  {
1921  PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno);
1923  }
1924  prefetchedUntil = prefetchStart;
1925  }
1926 
1927  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
1929 
1930  /* In this phase we only need shared access to the buffer */
1932 
1933  page = BufferGetPage(buf);
1934 
1935  if (PageIsNew(page) || PageIsEmpty(page))
1936  {
1937  /* PageIsNew probably shouldn't happen... */
1938  UnlockReleaseBuffer(buf);
1939  continue;
1940  }
1941 
1942  hastup = false;
1943  maxoff = PageGetMaxOffsetNumber(page);
1944  for (offnum = FirstOffsetNumber;
1945  offnum <= maxoff;
1946  offnum = OffsetNumberNext(offnum))
1947  {
1948  ItemId itemid;
1949 
1950  itemid = PageGetItemId(page, offnum);
1951 
1952  /*
1953  * Note: any non-unused item should be taken as a reason to keep
1954  * this page. We formerly thought that DEAD tuples could be
1955  * thrown away, but that's not so, because we'd not have cleaned
1956  * out their index entries.
1957  */
1958  if (ItemIdIsUsed(itemid))
1959  {
1960  hastup = true;
1961  break; /* can stop scanning */
1962  }
1963  } /* scan along page */
1964 
1965  UnlockReleaseBuffer(buf);
1966 
1967  /* Done scanning if we found a tuple here */
1968  if (hastup)
1969  return blkno + 1;
1970  }
1971 
1972  /*
1973  * If we fall out of the loop, all the previously-thought-to-be-empty
1974  * pages still are; we need not bother to look at the last known-nonempty
1975  * page.
1976  */
1977  return vacrelstats->nonempty_pages;
1978 }
#define PageIsEmpty(page)
Definition: bufpage.h:219
BlockNumber rel_pages
Definition: vacuumlazy.c:113
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
struct timeval instr_time
Definition: instr_time.h:147
#define ItemIdIsUsed(itemId)
Definition: itemid.h:91
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
uint16 OffsetNumber
Definition: off.h:24
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:757
#define PREFETCH_SIZE
Definition: vacuumlazy.c:105
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:167
static char * buf
Definition: pg_test_fsync.c:66
#define FirstOffsetNumber
Definition: off.h:27
#define RelationGetRelationName(relation)
Definition: rel.h:437
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
static int elevel
Definition: vacuumlazy.c:137
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:84
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
void PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:529
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:275
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:143
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:202
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
#define InvalidBlockNumber
Definition: block.h:33
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:153
#define AccessExclusiveLock
Definition: lockdefs.h:46
BlockNumber nonempty_pages
Definition: vacuumlazy.c:124
#define PageIsNew(page)
Definition: bufpage.h:226
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:97
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74
bool lock_waiter_detected
Definition: vacuumlazy.c:132
static bool heap_page_is_all_visible ( Relation  rel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool all_frozen 
)
static

Definition at line 2097 of file vacuumlazy.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleSatisfiesVacuum(), InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, OffsetNumberNext, OldestXmin, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), and TransactionIdPrecedes().

Referenced by lazy_vacuum_page().

2100 {
2101  Page page = BufferGetPage(buf);
2103  OffsetNumber offnum,
2104  maxoff;
2105  bool all_visible = true;
2106 
2107  *visibility_cutoff_xid = InvalidTransactionId;
2108  *all_frozen = true;
2109 
2110  /*
2111  * This is a stripped down version of the line pointer scan in
2112  * lazy_scan_heap(). So if you change anything here, also check that code.
2113  */
2114  maxoff = PageGetMaxOffsetNumber(page);
2115  for (offnum = FirstOffsetNumber;
2116  offnum <= maxoff && all_visible;
2117  offnum = OffsetNumberNext(offnum))
2118  {
2119  ItemId itemid;
2120  HeapTupleData tuple;
2121 
2122  itemid = PageGetItemId(page, offnum);
2123 
2124  /* Unused or redirect line pointers are of no interest */
2125  if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
2126  continue;
2127 
2128  ItemPointerSet(&(tuple.t_self), blockno, offnum);
2129 
2130  /*
2131  * Dead line pointers can have index pointers pointing to them. So
2132  * they can't be treated as visible
2133  */
2134  if (ItemIdIsDead(itemid))
2135  {
2136  all_visible = false;
2137  *all_frozen = false;
2138  break;
2139  }
2140 
2141  Assert(ItemIdIsNormal(itemid));
2142 
2143  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2144  tuple.t_len = ItemIdGetLength(itemid);
2145  tuple.t_tableOid = RelationGetRelid(rel);
2146 
2147  switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
2148  {
2149  case HEAPTUPLE_LIVE:
2150  {
2151  TransactionId xmin;
2152 
2153  /* Check comments in lazy_scan_heap. */
2155  {
2156  all_visible = false;
2157  *all_frozen = false;
2158  break;
2159  }
2160 
2161  /*
2162  * The inserter definitely committed. But is it old enough
2163  * that everyone sees it as committed?
2164  */
2165  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
2166  if (!TransactionIdPrecedes(xmin, OldestXmin))
2167  {
2168  all_visible = false;
2169  *all_frozen = false;
2170  break;
2171  }
2172 
2173  /* Track newest xmin on page. */
2174  if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
2175  *visibility_cutoff_xid = xmin;
2176 
2177  /* Check whether this tuple is already frozen or not */
2178  if (all_visible && *all_frozen &&
2180  *all_frozen = false;
2181  }
2182  break;
2183 
2184  case HEAPTUPLE_DEAD:
2188  {
2189  all_visible = false;
2190  *all_frozen = false;
2191  break;
2192  }
2193  default:
2194  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2195  break;
2196  }
2197  } /* scan along page */
2198 
2199  return all_visible;
2200 }
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:334
uint32 TransactionId
Definition: c.h:397
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdIsUsed(itemId)
Definition: itemid.h:91
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
Definition: tqual.c:1164
uint32 BlockNumber
Definition: block.h:31
#define ItemIdIsDead(itemId)
Definition: itemid.h:112
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7147
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:318
uint32 t_len
Definition: htup.h:64
static char * buf
Definition: pg_test_fsync.c:66
#define FirstOffsetNumber
Definition: off.h:27
#define InvalidTransactionId
Definition: transam.h:31
static TransactionId OldestXmin
Definition: vacuumlazy.c:139
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
#define Assert(condition)
Definition: c.h:675
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define elog
Definition: elog.h:219
#define RelationGetRelid(relation)
Definition: rel.h:417
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:104
static bool lazy_check_needs_freeze ( Buffer  buf,
bool hastup 
)
static

Definition at line 1545 of file vacuumlazy.c.

References BufferGetPage, FirstOffsetNumber, FreezeLimit, heap_tuple_needs_freeze(), ItemIdIsNormal, ItemIdIsUsed, MultiXactCutoff, OffsetNumberNext, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, and PageIsNew.

Referenced by lazy_scan_heap().

1546 {
1547  Page page = BufferGetPage(buf);
1548  OffsetNumber offnum,
1549  maxoff;
1550  HeapTupleHeader tupleheader;
1551 
1552  *hastup = false;
1553 
1554  /* If we hit an uninitialized page, we want to force vacuuming it. */
1555  if (PageIsNew(page))
1556  return true;
1557 
1558  /* Quick out for ordinary empty page. */
1559  if (PageIsEmpty(page))
1560  return false;
1561 
1562  maxoff = PageGetMaxOffsetNumber(page);
1563  for (offnum = FirstOffsetNumber;
1564  offnum <= maxoff;
1565  offnum = OffsetNumberNext(offnum))
1566  {
1567  ItemId itemid;
1568 
1569  itemid = PageGetItemId(page, offnum);
1570 
1571  /* this should match hastup test in count_nondeletable_pages() */
1572  if (ItemIdIsUsed(itemid))
1573  *hastup = true;
1574 
1575  /* dead and redirect items never need freezing */
1576  if (!ItemIdIsNormal(itemid))
1577  continue;
1578 
1579  tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1580 
1581  if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
1582  MultiXactCutoff, buf))
1583  return true;
1584  } /* scan along page */
1585 
1586  return false;
1587 }
#define PageIsEmpty(page)
Definition: bufpage.h:219
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdIsUsed(itemId)
Definition: itemid.h:91
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
uint16 OffsetNumber
Definition: off.h:24
static TransactionId FreezeLimit
Definition: vacuumlazy.c:140
static char * buf
Definition: pg_test_fsync.c:66
#define FirstOffsetNumber
Definition: off.h:27
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:141
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
Definition: heapam.c:7200
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
#define PageIsNew(page)
Definition: bufpage.h:226
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
static void lazy_cleanup_index ( Relation  indrel,
IndexBulkDeleteResult stats,
LVRelStats vacrelstats 
)
static

Definition at line 1628 of file vacuumlazy.c.

References IndexVacuumInfo::analyze_only, elevel, ereport, errdetail(), errmsg(), IndexVacuumInfo::estimated_count, IndexBulkDeleteResult::estimated_count, IndexVacuumInfo::index, index_vacuum_cleanup(), InvalidMultiXactId, InvalidTransactionId, IndexVacuumInfo::message_level, LVRelStats::new_rel_tuples, IndexVacuumInfo::num_heap_tuples, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, pfree(), pg_rusage_init(), pg_rusage_show(), LVRelStats::rel_pages, RelationGetRelationName, IndexVacuumInfo::strategy, LVRelStats::tupcount_pages, IndexBulkDeleteResult::tuples_removed, vac_strategy, and vac_update_relstats().

Referenced by lazy_scan_heap().

1631 {
1632  IndexVacuumInfo ivinfo;
1633  PGRUsage ru0;
1634 
1635  pg_rusage_init(&ru0);
1636 
1637  ivinfo.index = indrel;
1638  ivinfo.analyze_only = false;
1639  ivinfo.estimated_count = (vacrelstats->tupcount_pages < vacrelstats->rel_pages);
1640  ivinfo.message_level = elevel;
1641  ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
1642  ivinfo.strategy = vac_strategy;
1643 
1644  stats = index_vacuum_cleanup(&ivinfo, stats);
1645 
1646  if (!stats)
1647  return;
1648 
1649  /*
1650  * Now update statistics in pg_class, but only if the index says the count
1651  * is accurate.
1652  */
1653  if (!stats->estimated_count)
1654  vac_update_relstats(indrel,
1655  stats->num_pages,
1656  stats->num_index_tuples,
1657  0,
1658  false,
1661  false);
1662 
1663  ereport(elevel,
1664  (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
1665  RelationGetRelationName(indrel),
1666  stats->num_index_tuples,
1667  stats->num_pages),
1668  errdetail("%.0f index row versions were removed.\n"
1669  "%u index pages have been deleted, %u are currently reusable.\n"
1670  "%s.",
1671  stats->tuples_removed,
1672  stats->pages_deleted, stats->pages_free,
1673  pg_rusage_show(&ru0))));
1674 
1675  pfree(stats);
1676 }
double new_rel_tuples
Definition: vacuumlazy.c:120
double tuples_removed
Definition: genam.h:77
BlockNumber rel_pages
Definition: vacuumlazy.c:113
bool analyze_only
Definition: genam.h:47
BlockNumber tupcount_pages
Definition: vacuumlazy.c:117
BufferAccessStrategy strategy
Definition: genam.h:51
Relation index
Definition: genam.h:46
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
void pfree(void *pointer)
Definition: mcxt.c:950
BlockNumber num_pages
Definition: genam.h:73
BlockNumber pages_free
Definition: genam.h:79
int errdetail(const char *fmt,...)
Definition: elog.c:873
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:437
BlockNumber pages_deleted
Definition: genam.h:78
#define ereport(elevel, rest)
Definition: elog.h:122
static int elevel
Definition: vacuumlazy.c:137
#define InvalidMultiXactId
Definition: multixact.h:23
int message_level
Definition: genam.h:49
double num_heap_tuples
Definition: genam.h:50
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:143
IndexBulkDeleteResult * index_vacuum_cleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: indexam.c:764
int errmsg(const char *fmt,...)
Definition: elog.c:797
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool in_outer_xact)
Definition: vacuum.c:785
double num_index_tuples
Definition: genam.h:76
bool estimated_count
Definition: genam.h:75
bool estimated_count
Definition: genam.h:48
static void lazy_record_dead_tuple ( LVRelStats vacrelstats,
ItemPointer  itemptr 
)
static

Definition at line 2021 of file vacuumlazy.c.

References LVRelStats::dead_tuples, LVRelStats::max_dead_tuples, LVRelStats::num_dead_tuples, pgstat_progress_update_param(), and PROGRESS_VACUUM_NUM_DEAD_TUPLES.

Referenced by lazy_scan_heap().

2023 {
2024  /*
2025  * The array shouldn't overflow under normal behavior, but perhaps it
2026  * could if we are given a really small maintenance_work_mem. In that
2027  * case, just forget the last few tuples (we'll get 'em next time).
2028  */
2029  if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
2030  {
2031  vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
2032  vacrelstats->num_dead_tuples++;
2034  vacrelstats->num_dead_tuples);
2035  }
2036 }
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3053
int max_dead_tuples
Definition: vacuumlazy.c:128
ItemPointer dead_tuples
Definition: vacuumlazy.c:129
int num_dead_tuples
Definition: vacuumlazy.c:127
#define PROGRESS_VACUUM_NUM_DEAD_TUPLES
Definition: progress.h:27
static void lazy_scan_heap ( Relation  onerel,
int  options,
LVRelStats vacrelstats,
Relation Irel,
int  nindexes,
bool  aggressive 
)
static

Definition at line 459 of file vacuumlazy.c.

References _, appendStringInfo(), Assert, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferGetPageSize, BufferIsValid, ConditionalLockBufferForCleanup(), StringInfoData::data, elevel, elog, END_CRIT_SECTION, ereport, errdetail_internal(), errmsg(), ERROR, ExclusiveLock, FirstOffsetNumber, FORCE_CHECK_PAGE, FreezeLimit, LVRelStats::frozenskipped_pages, get_namespace_name(), heap_execute_freeze_tuple(), heap_page_prune(), heap_prepare_freeze_tuple(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleGetOid, HeapTupleHeaderAdvanceLatestRemovedXid(), HeapTupleHeaderGetXmin, HeapTupleHeaderXminCommitted, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleSatisfiesVacuum(), i, initStringInfo(), InvalidBuffer, InvalidTransactionId, InvalidXLogRecPtr, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, LVRelStats::latestRemovedXid, lazy_check_needs_freeze(), lazy_cleanup_index(), lazy_record_dead_tuple(), lazy_space_alloc(), lazy_vacuum_heap(), lazy_vacuum_index(), lazy_vacuum_page(), LockBuffer(), LockBufferForCleanup(), LockRelationForExtension(), log_heap_freeze(), log_newpage_buffer(), MAIN_FORKNUM, MarkBufferDirty(), LVRelStats::max_dead_tuples, MaxHeapTuplesPerPage, MultiXactCutoff, LVRelStats::new_dead_tuples, LVRelStats::new_rel_tuples, ngettext, LVRelStats::nonempty_pages, LVRelStats::num_dead_tuples, LVRelStats::num_index_scans, xl_heap_freeze_tuple::offset, OffsetNumberNext, OidIsValid, OldestXmin, PageClearAllVisible, PageGetHeapFreeSpace(), PageGetItem, PageGetItemId, PageGetLSN, PageGetMaxOffsetNumber, PageInit(), PageIsAllVisible, PageIsEmpty, PageIsNew, PageSetAllVisible, PageSetLSN, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), LVRelStats::pinskipped_pages, PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLES, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, RBM_NORMAL, RelationData::rd_rel, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelStats::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), LVRelStats::scanned_pages, LVRelStats::scanned_tuples, SKIP_PAGES_THRESHOLD, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdPrecedes(), LVRelStats::tupcount_pages, LVRelStats::tuples_deleted, UnlockRelationForExtension(), UnlockReleaseBuffer(), vac_estimate_reltuples(), VACOPT_DISABLE_PAGE_SKIPPING, vacuum_delay_point(), vacuum_log_cleanup_info(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, VM_ALL_FROZEN, VM_ALL_VISIBLE, and WARNING.

Referenced by lazy_vacuum_rel().

461 {
462  BlockNumber nblocks,
463  blkno;
464  HeapTupleData tuple;
465  char *relname;
466  BlockNumber empty_pages,
467  vacuumed_pages;
468  double num_tuples,
469  tups_vacuumed,
470  nkeep,
471  nunused;
472  IndexBulkDeleteResult **indstats;
473  int i;
474  PGRUsage ru0;
475  Buffer vmbuffer = InvalidBuffer;
476  BlockNumber next_unskippable_block;
477  bool skipping_blocks;
478  xl_heap_freeze_tuple *frozen;
480  const int initprog_index[] = {
484  };
485  int64 initprog_val[3];
486 
487  pg_rusage_init(&ru0);
488 
489  relname = RelationGetRelationName(onerel);
490  ereport(elevel,
491  (errmsg("vacuuming \"%s.%s\"",
493  relname)));
494 
495  empty_pages = vacuumed_pages = 0;
496  num_tuples = tups_vacuumed = nkeep = nunused = 0;
497 
498  indstats = (IndexBulkDeleteResult **)
499  palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
500 
501  nblocks = RelationGetNumberOfBlocks(onerel);
502  vacrelstats->rel_pages = nblocks;
503  vacrelstats->scanned_pages = 0;
504  vacrelstats->tupcount_pages = 0;
505  vacrelstats->nonempty_pages = 0;
506  vacrelstats->latestRemovedXid = InvalidTransactionId;
507 
508  lazy_space_alloc(vacrelstats, nblocks);
510 
511  /* Report that we're scanning the heap, advertising total # of blocks */
512  initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
513  initprog_val[1] = nblocks;
514  initprog_val[2] = vacrelstats->max_dead_tuples;
515  pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
516 
517  /*
518  * Except when aggressive is set, we want to skip pages that are
519  * all-visible according to the visibility map, but only when we can skip
520  * at least SKIP_PAGES_THRESHOLD consecutive pages. Since we're reading
521  * sequentially, the OS should be doing readahead for us, so there's no
522  * gain in skipping a page now and then; that's likely to disable
523  * readahead and so be counterproductive. Also, skipping even a single
524  * page means that we can't update relfrozenxid, so we only want to do it
525  * if we can skip a goodly number of pages.
526  *
527  * When aggressive is set, we can't skip pages just because they are
528  * all-visible, but we can still skip pages that are all-frozen, since
529  * such pages do not need freezing and do not affect the value that we can
530  * safely set for relfrozenxid or relminmxid.
531  *
532  * Before entering the main loop, establish the invariant that
533  * next_unskippable_block is the next block number >= blkno that's not we
534  * can't skip based on the visibility map, either all-visible for a
535  * regular scan or all-frozen for an aggressive scan. We set it to
536  * nblocks if there's no such block. We also set up the skipping_blocks
537  * flag correctly at this stage.
538  *
539  * Note: The value returned by visibilitymap_get_status could be slightly
540  * out-of-date, since we make this test before reading the corresponding
541  * heap page or locking the buffer. This is OK. If we mistakenly think
542  * that the page is all-visible or all-frozen when in fact the flag's just
543  * been cleared, we might fail to vacuum the page. It's easy to see that
544  * skipping a page when aggressive is not set is not a very big deal; we
545  * might leave some dead tuples lying around, but the next vacuum will
546  * find them. But even when aggressive *is* set, it's still OK if we miss
547  * a page whose all-frozen marking has just been cleared. Any new XIDs
548  * just added to that page are necessarily newer than the GlobalXmin we
549  * computed, so they'll have no effect on the value to which we can safely
550  * set relfrozenxid. A similar argument applies for MXIDs and relminmxid.
551  *
552  * We will scan the table's last page, at least to the extent of
553  * determining whether it has tuples or not, even if it should be skipped
554  * according to the above rules; except when we've already determined that
555  * it's not worth trying to truncate the table. This avoids having
556  * lazy_truncate_heap() take access-exclusive lock on the table to attempt
557  * a truncation that just fails immediately because there are tuples in
558  * the last page. This is worth avoiding mainly because such a lock must
559  * be replayed on any hot standby, where it can be disruptive.
560  */
561  next_unskippable_block = 0;
563  {
564  while (next_unskippable_block < nblocks)
565  {
566  uint8 vmstatus;
567 
568  vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
569  &vmbuffer);
570  if (aggressive)
571  {
572  if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
573  break;
574  }
575  else
576  {
577  if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
578  break;
579  }
581  next_unskippable_block++;
582  }
583  }
584 
585  if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
586  skipping_blocks = true;
587  else
588  skipping_blocks = false;
589 
590  for (blkno = 0; blkno < nblocks; blkno++)
591  {
592  Buffer buf;
593  Page page;
594  OffsetNumber offnum,
595  maxoff;
596  bool tupgone,
597  hastup;
598  int prev_dead_count;
599  int nfrozen;
600  Size freespace;
601  bool all_visible_according_to_vm = false;
602  bool all_visible;
603  bool all_frozen = true; /* provided all_visible is also true */
604  bool has_dead_tuples;
605  TransactionId visibility_cutoff_xid = InvalidTransactionId;
606 
607  /* see note above about forcing scanning of last page */
608 #define FORCE_CHECK_PAGE() \
609  (blkno == nblocks - 1 && should_attempt_truncation(vacrelstats))
610 
612 
613  if (blkno == next_unskippable_block)
614  {
615  /* Time to advance next_unskippable_block */
616  next_unskippable_block++;
617  if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
618  {
619  while (next_unskippable_block < nblocks)
620  {
621  uint8 vmskipflags;
622 
623  vmskipflags = visibilitymap_get_status(onerel,
624  next_unskippable_block,
625  &vmbuffer);
626  if (aggressive)
627  {
628  if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
629  break;
630  }
631  else
632  {
633  if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
634  break;
635  }
637  next_unskippable_block++;
638  }
639  }
640 
641  /*
642  * We know we can't skip the current block. But set up
643  * skipping_all_visible_blocks to do the right thing at the
644  * following blocks.
645  */
646  if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD)
647  skipping_blocks = true;
648  else
649  skipping_blocks = false;
650 
651  /*
652  * Normally, the fact that we can't skip this block must mean that
653  * it's not all-visible. But in an aggressive vacuum we know only
654  * that it's not all-frozen, so it might still be all-visible.
655  */
656  if (aggressive && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
657  all_visible_according_to_vm = true;
658  }
659  else
660  {
661  /*
662  * The current block is potentially skippable; if we've seen a
663  * long enough run of skippable blocks to justify skipping it, and
664  * we're not forced to check it, then go ahead and skip.
665  * Otherwise, the page must be at least all-visible if not
666  * all-frozen, so we can set all_visible_according_to_vm = true.
667  */
668  if (skipping_blocks && !FORCE_CHECK_PAGE())
669  {
670  /*
671  * Tricky, tricky. If this is in aggressive vacuum, the page
672  * must have been all-frozen at the time we checked whether it
673  * was skippable, but it might not be any more. We must be
674  * careful to count it as a skipped all-frozen page in that
675  * case, or else we'll think we can't update relfrozenxid and
676  * relminmxid. If it's not an aggressive vacuum, we don't
677  * know whether it was all-frozen, so we have to recheck; but
678  * in this case an approximate answer is OK.
679  */
680  if (aggressive || VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
681  vacrelstats->frozenskipped_pages++;
682  continue;
683  }
684  all_visible_according_to_vm = true;
685  }
686 
688 
689  /*
690  * If we are close to overrunning the available space for dead-tuple
691  * TIDs, pause and do a cycle of vacuuming before we tackle this page.
692  */
693  if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
694  vacrelstats->num_dead_tuples > 0)
695  {
696  const int hvp_index[] = {
699  };
700  int64 hvp_val[2];
701 
702  /*
703  * Before beginning index vacuuming, we release any pin we may
704  * hold on the visibility map page. This isn't necessary for
705  * correctness, but we do it anyway to avoid holding the pin
706  * across a lengthy, unrelated operation.
707  */
708  if (BufferIsValid(vmbuffer))
709  {
710  ReleaseBuffer(vmbuffer);
711  vmbuffer = InvalidBuffer;
712  }
713 
714  /* Log cleanup info before we touch indexes */
715  vacuum_log_cleanup_info(onerel, vacrelstats);
716 
717  /* Report that we are now vacuuming indexes */
720 
721  /* Remove index entries */
722  for (i = 0; i < nindexes; i++)
723  lazy_vacuum_index(Irel[i],
724  &indstats[i],
725  vacrelstats);
726 
727  /*
728  * Report that we are now vacuuming the heap. We also increase
729  * the number of index scans here; note that by using
730  * pgstat_progress_update_multi_param we can update both
731  * parameters atomically.
732  */
734  hvp_val[1] = vacrelstats->num_index_scans + 1;
735  pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
736 
737  /* Remove tuples from heap */
738  lazy_vacuum_heap(onerel, vacrelstats);
739 
740  /*
741  * Forget the now-vacuumed tuples, and press on, but be careful
742  * not to reset latestRemovedXid since we want that value to be
743  * valid.
744  */
745  vacrelstats->num_dead_tuples = 0;
746  vacrelstats->num_index_scans++;
747 
748  /* Report that we are once again scanning the heap */
751  }
752 
753  /*
754  * Pin the visibility map page in case we need to mark the page
755  * all-visible. In most cases this will be very cheap, because we'll
756  * already have the correct page pinned anyway. However, it's
757  * possible that (a) next_unskippable_block is covered by a different
758  * VM page than the current block or (b) we released our pin and did a
759  * cycle of index vacuuming.
760  *
761  */
762  visibilitymap_pin(onerel, blkno, &vmbuffer);
763 
764  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
766 
767  /* We need buffer cleanup lock so that we can prune HOT chains. */
769  {
770  /*
771  * If we're not performing an aggressive scan to guard against XID
772  * wraparound, and we don't want to forcibly check the page, then
773  * it's OK to skip vacuuming pages we get a lock conflict on. They
774  * will be dealt with in some future vacuum.
775  */
776  if (!aggressive && !FORCE_CHECK_PAGE())
777  {
778  ReleaseBuffer(buf);
779  vacrelstats->pinskipped_pages++;
780  continue;
781  }
782 
783  /*
784  * Read the page with share lock to see if any xids on it need to
785  * be frozen. If not we just skip the page, after updating our
786  * scan statistics. If there are some, we wait for cleanup lock.
787  *
788  * We could defer the lock request further by remembering the page
789  * and coming back to it later, or we could even register
790  * ourselves for multiple buffers and then service whichever one
791  * is received first. For now, this seems good enough.
792  *
793  * If we get here with aggressive false, then we're just forcibly
794  * checking the page, and so we don't want to insist on getting
795  * the lock; we only need to know if the page contains tuples, so
796  * that we can update nonempty_pages correctly. It's convenient
797  * to use lazy_check_needs_freeze() for both situations, though.
798  */
800  if (!lazy_check_needs_freeze(buf, &hastup))
801  {
802  UnlockReleaseBuffer(buf);
803  vacrelstats->scanned_pages++;
804  vacrelstats->pinskipped_pages++;
805  if (hastup)
806  vacrelstats->nonempty_pages = blkno + 1;
807  continue;
808  }
809  if (!aggressive)
810  {
811  /*
812  * Here, we must not advance scanned_pages; that would amount
813  * to claiming that the page contains no freezable tuples.
814  */
815  UnlockReleaseBuffer(buf);
816  vacrelstats->pinskipped_pages++;
817  if (hastup)
818  vacrelstats->nonempty_pages = blkno + 1;
819  continue;
820  }
823  /* drop through to normal processing */
824  }
825 
826  vacrelstats->scanned_pages++;
827  vacrelstats->tupcount_pages++;
828 
829  page = BufferGetPage(buf);
830 
831  if (PageIsNew(page))
832  {
833  /*
834  * An all-zeroes page could be left over if a backend extends the
835  * relation but crashes before initializing the page. Reclaim such
836  * pages for use.
837  *
838  * We have to be careful here because we could be looking at a
839  * page that someone has just added to the relation and not yet
840  * been able to initialize (see RelationGetBufferForTuple). To
841  * protect against that, release the buffer lock, grab the
842  * relation extension lock momentarily, and re-lock the buffer. If
843  * the page is still uninitialized by then, it must be left over
844  * from a crashed backend, and we can initialize it.
845  *
846  * We don't really need the relation lock when this is a new or
847  * temp relation, but it's probably not worth the code space to
848  * check that, since this surely isn't a critical path.
849  *
850  * Note: the comparable code in vacuum.c need not worry because
851  * it's got exclusive lock on the whole relation.
852  */
857  if (PageIsNew(page))
858  {
860  (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
861  relname, blkno)));
862  PageInit(page, BufferGetPageSize(buf), 0);
863  empty_pages++;
864  }
865  freespace = PageGetHeapFreeSpace(page);
866  MarkBufferDirty(buf);
867  UnlockReleaseBuffer(buf);
868 
869  RecordPageWithFreeSpace(onerel, blkno, freespace);
870  continue;
871  }
872 
873  if (PageIsEmpty(page))
874  {
875  empty_pages++;
876  freespace = PageGetHeapFreeSpace(page);
877 
878  /* empty pages are always all-visible and all-frozen */
879  if (!PageIsAllVisible(page))
880  {
882 
883  /* mark buffer dirty before writing a WAL record */
884  MarkBufferDirty(buf);
885 
886  /*
887  * It's possible that another backend has extended the heap,
888  * initialized the page, and then failed to WAL-log the page
889  * due to an ERROR. Since heap extension is not WAL-logged,
890  * recovery might try to replay our record setting the page
891  * all-visible and find that the page isn't initialized, which
892  * will cause a PANIC. To prevent that, check whether the
893  * page has been previously WAL-logged, and if not, do that
894  * now.
895  */
896  if (RelationNeedsWAL(onerel) &&
897  PageGetLSN(page) == InvalidXLogRecPtr)
898  log_newpage_buffer(buf, true);
899 
900  PageSetAllVisible(page);
901  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
902  vmbuffer, InvalidTransactionId,
905  }
906 
907  UnlockReleaseBuffer(buf);
908  RecordPageWithFreeSpace(onerel, blkno, freespace);
909  continue;
910  }
911 
912  /*
913  * Prune all HOT-update chains in this page.
914  *
915  * We count tuples removed by the pruning step as removed by VACUUM.
916  */
917  tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
918  &vacrelstats->latestRemovedXid);
919 
920  /*
921  * Now scan the page to collect vacuumable items and check for tuples
922  * requiring freezing.
923  */
924  all_visible = true;
925  has_dead_tuples = false;
926  nfrozen = 0;
927  hastup = false;
928  prev_dead_count = vacrelstats->num_dead_tuples;
929  maxoff = PageGetMaxOffsetNumber(page);
930 
931  /*
932  * Note: If you change anything in the loop below, also look at
933  * heap_page_is_all_visible to see if that needs to be changed.
934  */
935  for (offnum = FirstOffsetNumber;
936  offnum <= maxoff;
937  offnum = OffsetNumberNext(offnum))
938  {
939  ItemId itemid;
940 
941  itemid = PageGetItemId(page, offnum);
942 
943  /* Unused items require no processing, but we count 'em */
944  if (!ItemIdIsUsed(itemid))
945  {
946  nunused += 1;
947  continue;
948  }
949 
950  /* Redirect items mustn't be touched */
951  if (ItemIdIsRedirected(itemid))
952  {
953  hastup = true; /* this page won't be truncatable */
954  continue;
955  }
956 
957  ItemPointerSet(&(tuple.t_self), blkno, offnum);
958 
959  /*
960  * DEAD item pointers are to be vacuumed normally; but we don't
961  * count them in tups_vacuumed, else we'd be double-counting (at
962  * least in the common case where heap_page_prune() just freed up
963  * a non-HOT tuple).
964  */
965  if (ItemIdIsDead(itemid))
966  {
967  lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
968  all_visible = false;
969  continue;
970  }
971 
972  Assert(ItemIdIsNormal(itemid));
973 
974  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
975  tuple.t_len = ItemIdGetLength(itemid);
976  tuple.t_tableOid = RelationGetRelid(onerel);
977 
978  tupgone = false;
979 
980  switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
981  {
982  case HEAPTUPLE_DEAD:
983 
984  /*
985  * Ordinarily, DEAD tuples would have been removed by
986  * heap_page_prune(), but it's possible that the tuple
987  * state changed since heap_page_prune() looked. In
988  * particular an INSERT_IN_PROGRESS tuple could have
989  * changed to DEAD if the inserter aborted. So this
990  * cannot be considered an error condition.
991  *
992  * If the tuple is HOT-updated then it must only be
993  * removed by a prune operation; so we keep it just as if
994  * it were RECENTLY_DEAD. Also, if it's a heap-only
995  * tuple, we choose to keep it, because it'll be a lot
996  * cheaper to get rid of it in the next pruning pass than
997  * to treat it like an indexed tuple.
998  */
999  if (HeapTupleIsHotUpdated(&tuple) ||
1000  HeapTupleIsHeapOnly(&tuple))
1001  nkeep += 1;
1002  else
1003  tupgone = true; /* we can delete the tuple */
1004  all_visible = false;
1005  break;
1006  case HEAPTUPLE_LIVE:
1007  /* Tuple is good --- but let's do some validity checks */
1008  if (onerel->rd_rel->relhasoids &&
1009  !OidIsValid(HeapTupleGetOid(&tuple)))
1010  elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
1011  relname, blkno, offnum);
1012 
1013  /*
1014  * Is the tuple definitely visible to all transactions?
1015  *
1016  * NB: Like with per-tuple hint bits, we can't set the
1017  * PD_ALL_VISIBLE flag if the inserter committed
1018  * asynchronously. See SetHintBits for more info. Check
1019  * that the tuple is hinted xmin-committed because of
1020  * that.
1021  */
1022  if (all_visible)
1023  {
1024  TransactionId xmin;
1025 
1027  {
1028  all_visible = false;
1029  break;
1030  }
1031 
1032  /*
1033  * The inserter definitely committed. But is it old
1034  * enough that everyone sees it as committed?
1035  */
1036  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1037  if (!TransactionIdPrecedes(xmin, OldestXmin))
1038  {
1039  all_visible = false;
1040  break;
1041  }
1042 
1043  /* Track newest xmin on page. */
1044  if (TransactionIdFollows(xmin, visibility_cutoff_xid))
1045  visibility_cutoff_xid = xmin;
1046  }
1047  break;
1049 
1050  /*
1051  * If tuple is recently deleted then we must not remove it
1052  * from relation.
1053  */
1054  nkeep += 1;
1055  all_visible = false;
1056  break;
1058  /* This is an expected case during concurrent vacuum */
1059  all_visible = false;
1060  break;
1062  /* This is an expected case during concurrent vacuum */
1063  all_visible = false;
1064  break;
1065  default:
1066  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1067  break;
1068  }
1069 
1070  if (tupgone)
1071  {
1072  lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
1074  &vacrelstats->latestRemovedXid);
1075  tups_vacuumed += 1;
1076  has_dead_tuples = true;
1077  }
1078  else
1079  {
1080  bool tuple_totally_frozen;
1081 
1082  num_tuples += 1;
1083  hastup = true;
1084 
1085  /*
1086  * Each non-removable tuple must be checked to see if it needs
1087  * freezing. Note we already have exclusive buffer lock.
1088  */
1090  MultiXactCutoff, &frozen[nfrozen],
1091  &tuple_totally_frozen))
1092  frozen[nfrozen++].offset = offnum;
1093 
1094  if (!tuple_totally_frozen)
1095  all_frozen = false;
1096  }
1097  } /* scan along page */
1098 
1099  /*
1100  * If we froze any tuples, mark the buffer dirty, and write a WAL
1101  * record recording the changes. We must log the changes to be
1102  * crash-safe against future truncation of CLOG.
1103  */
1104  if (nfrozen > 0)
1105  {
1107 
1108  MarkBufferDirty(buf);
1109 
1110  /* execute collected freezes */
1111  for (i = 0; i < nfrozen; i++)
1112  {
1113  ItemId itemid;
1114  HeapTupleHeader htup;
1115 
1116  itemid = PageGetItemId(page, frozen[i].offset);
1117  htup = (HeapTupleHeader) PageGetItem(page, itemid);
1118 
1119  heap_execute_freeze_tuple(htup, &frozen[i]);
1120  }
1121 
1122  /* Now WAL-log freezing if necessary */
1123  if (RelationNeedsWAL(onerel))
1124  {
1125  XLogRecPtr recptr;
1126 
1127  recptr = log_heap_freeze(onerel, buf, FreezeLimit,
1128  frozen, nfrozen);
1129  PageSetLSN(page, recptr);
1130  }
1131 
1132  END_CRIT_SECTION();
1133  }
1134 
1135  /*
1136  * If there are no indexes then we can vacuum the page right now
1137  * instead of doing a second scan.
1138  */
1139  if (nindexes == 0 &&
1140  vacrelstats->num_dead_tuples > 0)
1141  {
1142  /* Remove tuples from heap */
1143  lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
1144  has_dead_tuples = false;
1145 
1146  /*
1147  * Forget the now-vacuumed tuples, and press on, but be careful
1148  * not to reset latestRemovedXid since we want that value to be
1149  * valid.
1150  */
1151  vacrelstats->num_dead_tuples = 0;
1152  vacuumed_pages++;
1153  }
1154 
1155  freespace = PageGetHeapFreeSpace(page);
1156 
1157  /* mark page all-visible, if appropriate */
1158  if (all_visible && !all_visible_according_to_vm)
1159  {
1161 
1162  if (all_frozen)
1163  flags |= VISIBILITYMAP_ALL_FROZEN;
1164 
1165  /*
1166  * It should never be the case that the visibility map page is set
1167  * while the page-level bit is clear, but the reverse is allowed
1168  * (if checksums are not enabled). Regardless, set the both bits
1169  * so that we get back in sync.
1170  *
1171  * NB: If the heap page is all-visible but the VM bit is not set,
1172  * we don't need to dirty the heap page. However, if checksums
1173  * are enabled, we do need to make sure that the heap page is
1174  * dirtied before passing it to visibilitymap_set(), because it
1175  * may be logged. Given that this situation should only happen in
1176  * rare cases after a crash, it is not worth optimizing.
1177  */
1178  PageSetAllVisible(page);
1179  MarkBufferDirty(buf);
1180  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1181  vmbuffer, visibility_cutoff_xid, flags);
1182  }
1183 
1184  /*
1185  * As of PostgreSQL 9.2, the visibility map bit should never be set if
1186  * the page-level bit is clear. However, it's possible that the bit
1187  * got cleared after we checked it and before we took the buffer
1188  * content lock, so we must recheck before jumping to the conclusion
1189  * that something bad has happened.
1190  */
1191  else if (all_visible_according_to_vm && !PageIsAllVisible(page)
1192  && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
1193  {
1194  elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1195  relname, blkno);
1196  visibilitymap_clear(onerel, blkno, vmbuffer,
1198  }
1199 
1200  /*
1201  * It's possible for the value returned by GetOldestXmin() to move
1202  * backwards, so it's not wrong for us to see tuples that appear to
1203  * not be visible to everyone yet, while PD_ALL_VISIBLE is already
1204  * set. The real safe xmin value never moves backwards, but
1205  * GetOldestXmin() is conservative and sometimes returns a value
1206  * that's unnecessarily small, so if we see that contradiction it just
1207  * means that the tuples that we think are not visible to everyone yet
1208  * actually are, and the PD_ALL_VISIBLE flag is correct.
1209  *
1210  * There should never be dead tuples on a page with PD_ALL_VISIBLE
1211  * set, however.
1212  */
1213  else if (PageIsAllVisible(page) && has_dead_tuples)
1214  {
1215  elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
1216  relname, blkno);
1217  PageClearAllVisible(page);
1218  MarkBufferDirty(buf);
1219  visibilitymap_clear(onerel, blkno, vmbuffer,
1221  }
1222 
1223  /*
1224  * If the all-visible page is turned out to be all-frozen but not
1225  * marked, we should so mark it. Note that all_frozen is only valid
1226  * if all_visible is true, so we must check both.
1227  */
1228  else if (all_visible_according_to_vm && all_visible && all_frozen &&
1229  !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
1230  {
1231  /*
1232  * We can pass InvalidTransactionId as the cutoff XID here,
1233  * because setting the all-frozen bit doesn't cause recovery
1234  * conflicts.
1235  */
1236  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1237  vmbuffer, InvalidTransactionId,
1239  }
1240 
1241  UnlockReleaseBuffer(buf);
1242 
1243  /* Remember the location of the last page with nonremovable tuples */
1244  if (hastup)
1245  vacrelstats->nonempty_pages = blkno + 1;
1246 
1247  /*
1248  * If we remembered any tuples for deletion, then the page will be
1249  * visited again by lazy_vacuum_heap, which will compute and record
1250  * its post-compaction free space. If not, then we're done with this
1251  * page, so remember its free space as-is. (This path will always be
1252  * taken if there are no indexes.)
1253  */
1254  if (vacrelstats->num_dead_tuples == prev_dead_count)
1255  RecordPageWithFreeSpace(onerel, blkno, freespace);
1256  }
1257 
1258  /* report that everything is scanned and vacuumed */
1260 
1261  pfree(frozen);
1262 
1263  /* save stats for use later */
1264  vacrelstats->scanned_tuples = num_tuples;
1265  vacrelstats->tuples_deleted = tups_vacuumed;
1266  vacrelstats->new_dead_tuples = nkeep;
1267 
1268  /* now we can compute the new value for pg_class.reltuples */
1269  vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
1270  nblocks,
1271  vacrelstats->tupcount_pages,
1272  num_tuples);
1273 
1274  /*
1275  * Release any remaining pin on visibility map page.
1276  */
1277  if (BufferIsValid(vmbuffer))
1278  {
1279  ReleaseBuffer(vmbuffer);
1280  vmbuffer = InvalidBuffer;
1281  }
1282 
1283  /* If any tuples need to be deleted, perform final vacuum cycle */
1284  /* XXX put a threshold on min number of tuples here? */
1285  if (vacrelstats->num_dead_tuples > 0)
1286  {
1287  const int hvp_index[] = {
1290  };
1291  int64 hvp_val[2];
1292 
1293  /* Log cleanup info before we touch indexes */
1294  vacuum_log_cleanup_info(onerel, vacrelstats);
1295 
1296  /* Report that we are now vacuuming indexes */
1299 
1300  /* Remove index entries */
1301  for (i = 0; i < nindexes; i++)
1302  lazy_vacuum_index(Irel[i],
1303  &indstats[i],
1304  vacrelstats);
1305 
1306  /* Report that we are now vacuuming the heap */
1307  hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
1308  hvp_val[1] = vacrelstats->num_index_scans + 1;
1309  pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
1310 
1311  /* Remove tuples from heap */
1314  lazy_vacuum_heap(onerel, vacrelstats);
1315  vacrelstats->num_index_scans++;
1316  }
1317 
1318  /* report all blocks vacuumed; and that we're cleaning up */
1322 
1323  /* Do post-vacuum cleanup and statistics update for each index */
1324  for (i = 0; i < nindexes; i++)
1325  lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
1326 
1327  /* If no indexes, make log report that lazy_vacuum_heap would've made */
1328  if (vacuumed_pages)
1329  ereport(elevel,
1330  (errmsg("\"%s\": removed %.0f row versions in %u pages",
1331  RelationGetRelationName(onerel),
1332  tups_vacuumed, vacuumed_pages)));
1333 
1334  /*
1335  * This is pretty messy, but we split it up so that we can skip emitting
1336  * individual parts of the message when not applicable.
1337  */
1338  initStringInfo(&buf);
1339  appendStringInfo(&buf,
1340  _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
1341  nkeep, OldestXmin);
1342  appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
1343  nunused);
1344  appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
1345  "Skipped %u pages due to buffer pins, ",
1346  vacrelstats->pinskipped_pages),
1347  vacrelstats->pinskipped_pages);
1348  appendStringInfo(&buf, ngettext("%u frozen page.\n",
1349  "%u frozen pages.\n",
1350  vacrelstats->frozenskipped_pages),
1351  vacrelstats->frozenskipped_pages);
1352  appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
1353  "%u pages are entirely empty.\n",
1354  empty_pages),
1355  empty_pages);
1356  appendStringInfo(&buf, _("%s."),
1357  pg_rusage_show(&ru0));
1358 
1359  ereport(elevel,
1360  (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
1361  RelationGetRelationName(onerel),
1362  tups_vacuumed, num_tuples,
1363  vacrelstats->scanned_pages, nblocks),
1364  errdetail_internal("%s", buf.data)));
1365  pfree(buf.data);
1366 }
double new_rel_tuples
Definition: vacuumlazy.c:120
void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, TransactionId *latestRemovedXid)
Definition: heapam.c:7279
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
Definition: vacuumlazy.c:1447
int heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin, bool report_stats, TransactionId *latestRemovedXid)
Definition: pruneheap.c:182
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3603
XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
Definition: heapam.c:7398
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define PageIsEmpty(page)
Definition: bufpage.h:219
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1010
BlockNumber rel_pages
Definition: vacuumlazy.c:113
OffsetNumber offset
Definition: heapam_xlog.h:299
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1381
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
double vac_estimate_reltuples(Relation relation, bool is_analyze, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:685
static void lazy_record_dead_tuple(LVRelStats *vacrelstats, ItemPointer itemptr)
Definition: vacuumlazy.c:2021
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:334
#define PageIsAllVisible(page)
Definition: bufpage.h:382
uint32 TransactionId
Definition: c.h:397
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:181
#define PROGRESS_VACUUM_MAX_DEAD_TUPLES
Definition: progress.h:26
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:31
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
double tuples_deleted
Definition: vacuumlazy.c:123
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
#define ExclusiveLock
Definition: lockdefs.h:44
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3053
#define VISIBILITYMAP_ALL_FROZEN
Definition: visibilitymap.h:27
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
BlockNumber tupcount_pages
Definition: vacuumlazy.c:117
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
#define ItemIdIsUsed(itemId)
Definition: itemid.h:91
#define MaxHeapTuplesPerPage
Definition: htup_details.h:575
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:34
unsigned char uint8
Definition: c.h:266
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define InvalidBuffer
Definition: buf.h:25
static void lazy_cleanup_index(Relation indrel, IndexBulkDeleteResult *stats, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1628
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
Definition: tqual.c:1164
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
BlockNumber scanned_pages
Definition: vacuumlazy.c:114
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
BlockNumber pinskipped_pages
Definition: vacuumlazy.c:115
Form_pg_class rd_rel
Definition: rel.h:114
#define ItemIdIsDead(itemId)
Definition: itemid.h:112
#define OidIsValid(objectId)
Definition: c.h:538
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
int errdetail_internal(const char *fmt,...)
Definition: elog.c:900
uint16 OffsetNumber
Definition: off.h:24
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
#define FORCE_CHECK_PAGE()
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:677
void pfree(void *pointer)
Definition: mcxt.c:950
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:110
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3718
#define ERROR
Definition: elog.h:43
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:666
int max_dead_tuples
Definition: vacuumlazy.c:128
ItemPointerData t_self
Definition: htup.h:65
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:318
static TransactionId FreezeLimit
Definition: vacuumlazy.c:140
uint32 t_len
Definition: htup.h:64
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6747
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3038
static char * buf
Definition: pg_test_fsync.c:66
#define PageSetAllVisible(page)
Definition: bufpage.h:384
#define FirstOffsetNumber
Definition: off.h:27
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:141
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:437
static TransactionId OldestXmin
Definition: vacuumlazy.c:139
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
int num_dead_tuples
Definition: vacuumlazy.c:127
#define ereport(elevel, rest)
Definition: elog.h:122
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:99
void initStringInfo(StringInfo str)
Definition: stringinfo.c:65
#define WARNING
Definition: elog.h:40
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void LockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:332
static int elevel
Definition: vacuumlazy.c:137
#define ngettext(s, p, n)
Definition: c.h:127
void * palloc0(Size size)
Definition: mcxt.c:878
void UnlockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:382
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:147
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6569
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
int num_index_scans
Definition: vacuumlazy.c:130
static void vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:426
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:143
#define PageClearAllVisible(page)
Definition: bufpage.h:386
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:686
#define Assert(condition)
Definition: c.h:675
double new_dead_tuples
Definition: vacuumlazy.c:121
TransactionId latestRemovedXid
Definition: vacuumlazy.c:131
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1597
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:33
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
#define VM_ALL_VISIBLE(r, b, v)
Definition: visibilitymap.h:32
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
Definition: pgstat.c:3075
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
size_t Size
Definition: c.h:356
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:30
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
#define RelationNeedsWAL(relation)
Definition: rel.h:506
#define VISIBILITYMAP_ALL_VISIBLE
Definition: visibilitymap.h:26
#define PageGetLSN(page)
Definition: bufpage.h:363
BlockNumber nonempty_pages
Definition: vacuumlazy.c:124
#define PageIsNew(page)
Definition: bufpage.h:226
void * palloc(Size size)
Definition: mcxt.c:849
int errmsg(const char *fmt,...)
Definition: elog.c:797
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
BlockNumber frozenskipped_pages
Definition: vacuumlazy.c:116
double scanned_tuples
Definition: vacuumlazy.c:118
int i
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup)
Definition: vacuumlazy.c:1545
#define elog
Definition: elog.h:219
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
void vacuum_delay_point(void)
Definition: vacuum.c:1560
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
#define _(x)
Definition: elog.c:84
#define RelationGetRelid(relation)
Definition: rel.h:417
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:32
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:104
static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
Definition: vacuumlazy.c:1986
#define RelationGetNamespace(relation)
Definition: rel.h:444
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:41
static void lazy_space_alloc ( LVRelStats vacrelstats,
BlockNumber  relblocks 
)
static

Definition at line 1986 of file vacuumlazy.c.

References autovacuum_work_mem, LVRelStats::dead_tuples, LVRelStats::hasindex, IsAutoVacuumWorkerProcess(), LAZY_ALLOC_TUPLES, maintenance_work_mem, Max, LVRelStats::max_dead_tuples, MaxAllocSize, MaxHeapTuplesPerPage, Min, LVRelStats::num_dead_tuples, and palloc().

Referenced by lazy_scan_heap().

1987 {
1988  long maxtuples;
1989  int vac_work_mem = IsAutoVacuumWorkerProcess() &&
1990  autovacuum_work_mem != -1 ?
1992 
1993  if (vacrelstats->hasindex)
1994  {
1995  maxtuples = (vac_work_mem * 1024L) / sizeof(ItemPointerData);
1996  maxtuples = Min(maxtuples, INT_MAX);
1997  maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
1998 
1999  /* curious coding here to ensure the multiplication can't overflow */
2000  if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
2001  maxtuples = relblocks * LAZY_ALLOC_TUPLES;
2002 
2003  /* stay sane if small maintenance_work_mem */
2004  maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
2005  }
2006  else
2007  {
2008  maxtuples = MaxHeapTuplesPerPage;
2009  }
2010 
2011  vacrelstats->num_dead_tuples = 0;
2012  vacrelstats->max_dead_tuples = (int) maxtuples;
2013  vacrelstats->dead_tuples = (ItemPointer)
2014  palloc(maxtuples * sizeof(ItemPointerData));
2015 }
int autovacuum_work_mem
Definition: autovacuum.c:114
#define Min(x, y)
Definition: c.h:806
#define MaxHeapTuplesPerPage
Definition: htup_details.h:575
uint32 BlockNumber
Definition: block.h:31
ItemPointerData * ItemPointer
Definition: itemptr.h:48
int max_dead_tuples
Definition: vacuumlazy.c:128
ItemPointer dead_tuples
Definition: vacuumlazy.c:129
int num_dead_tuples
Definition: vacuumlazy.c:127
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3360
#define MaxAllocSize
Definition: memutils.h:40
bool hasindex
Definition: vacuumlazy.c:110
int maintenance_work_mem
Definition: globals.c:113
#define Max(x, y)
Definition: c.h:800
void * palloc(Size size)
Definition: mcxt.c:849
#define LAZY_ALLOC_TUPLES
Definition: vacuumlazy.c:93
static bool lazy_tid_reaped ( ItemPointer  itemptr,
void *  state 
)
static

Definition at line 2046 of file vacuumlazy.c.

References LVRelStats::dead_tuples, NULL, LVRelStats::num_dead_tuples, and vac_cmp_itemptr().

Referenced by lazy_vacuum_index().

2047 {
2048  LVRelStats *vacrelstats = (LVRelStats *) state;
2049  ItemPointer res;
2050 
2051  res = (ItemPointer) bsearch((void *) itemptr,
2052  (void *) vacrelstats->dead_tuples,
2053  vacrelstats->num_dead_tuples,
2054  sizeof(ItemPointerData),
2055  vac_cmp_itemptr);
2056 
2057  return (res != NULL);
2058 }
ItemPointerData * ItemPointer
Definition: itemptr.h:48
ItemPointer dead_tuples
Definition: vacuumlazy.c:129
int num_dead_tuples
Definition: vacuumlazy.c:127
#define NULL
Definition: c.h:229
Definition: regguts.h:298
static int vac_cmp_itemptr(const void *left, const void *right)
Definition: vacuumlazy.c:2064
static void lazy_truncate_heap ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 1716 of file vacuumlazy.c.

References AccessExclusiveLock, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), elevel, ereport, errdetail(), errmsg(), LVRelStats::lock_waiter_detected, LVRelStats::nonempty_pages, LVRelStats::pages_removed, pg_rusage_init(), pg_rusage_show(), pg_usleep(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelStats::rel_pages, RelationGetNumberOfBlocks, RelationGetRelationName, RelationTruncate(), UnlockRelation(), VACUUM_TRUNCATE_LOCK_TIMEOUT, and VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL.

Referenced by lazy_vacuum_rel().

1717 {
1718  BlockNumber old_rel_pages = vacrelstats->rel_pages;
1719  BlockNumber new_rel_pages;
1720  PGRUsage ru0;
1721  int lock_retry;
1722 
1723  pg_rusage_init(&ru0);
1724 
1725  /* Report that we are now truncating */
1728 
1729  /*
1730  * Loop until no more truncating can be done.
1731  */
1732  do
1733  {
1734  /*
1735  * We need full exclusive lock on the relation in order to do
1736  * truncation. If we can't get it, give up rather than waiting --- we
1737  * don't want to block other backends, and we don't want to deadlock
1738  * (which is quite possible considering we already hold a lower-grade
1739  * lock).
1740  */
1741  vacrelstats->lock_waiter_detected = false;
1742  lock_retry = 0;
1743  while (true)
1744  {
1746  break;
1747 
1748  /*
1749  * Check for interrupts while trying to (re-)acquire the exclusive
1750  * lock.
1751  */
1753 
1754  if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
1756  {
1757  /*
1758  * We failed to establish the lock in the specified number of
1759  * retries. This means we give up truncating.
1760  */
1761  vacrelstats->lock_waiter_detected = true;
1762  ereport(elevel,
1763  (errmsg("\"%s\": stopping truncate due to conflicting lock request",
1764  RelationGetRelationName(onerel))));
1765  return;
1766  }
1767 
1769  }
1770 
1771  /*
1772  * Now that we have exclusive lock, look to see if the rel has grown
1773  * whilst we were vacuuming with non-exclusive lock. If so, give up;
1774  * the newly added pages presumably contain non-deletable tuples.
1775  */
1776  new_rel_pages = RelationGetNumberOfBlocks(onerel);
1777  if (new_rel_pages != old_rel_pages)
1778  {
1779  /*
1780  * Note: we intentionally don't update vacrelstats->rel_pages with
1781  * the new rel size here. If we did, it would amount to assuming
1782  * that the new pages are empty, which is unlikely. Leaving the
1783  * numbers alone amounts to assuming that the new pages have the
1784  * same tuple density as existing ones, which is less unlikely.
1785  */
1787  return;
1788  }
1789 
1790  /*
1791  * Scan backwards from the end to verify that the end pages actually
1792  * contain no tuples. This is *necessary*, not optional, because
1793  * other backends could have added tuples to these pages whilst we
1794  * were vacuuming.
1795  */
1796  new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1797 
1798  if (new_rel_pages >= old_rel_pages)
1799  {
1800  /* can't do anything after all */
1802  return;
1803  }
1804 
1805  /*
1806  * Okay to truncate.
1807  */
1808  RelationTruncate(onerel, new_rel_pages);
1809 
1810  /*
1811  * We can release the exclusive lock as soon as we have truncated.
1812  * Other backends can't safely access the relation until they have
1813  * processed the smgr invalidation that smgrtruncate sent out ... but
1814  * that should happen as part of standard invalidation processing once
1815  * they acquire lock on the relation.
1816  */
1818 
1819  /*
1820  * Update statistics. Here, it *is* correct to adjust rel_pages
1821  * without also touching reltuples, since the tuple count wasn't
1822  * changed by the truncation.
1823  */
1824  vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
1825  vacrelstats->rel_pages = new_rel_pages;
1826 
1827  ereport(elevel,
1828  (errmsg("\"%s\": truncated %u to %u pages",
1829  RelationGetRelationName(onerel),
1830  old_rel_pages, new_rel_pages),
1831  errdetail("%s.",
1832  pg_rusage_show(&ru0))));
1833  old_rel_pages = new_rel_pages;
1834  } while (new_rel_pages > vacrelstats->nonempty_pages &&
1835  vacrelstats->lock_waiter_detected);
1836 }
static BlockNumber count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1844
BlockNumber rel_pages
Definition: vacuumlazy.c:113
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:257
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3053
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:86
uint32 BlockNumber
Definition: block.h:31
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:34
void pg_usleep(long microsec)
Definition: signal.c:53
int errdetail(const char *fmt,...)
Definition: elog.c:873
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:226
#define RelationGetRelationName(relation)
Definition: rel.h:437
#define ereport(elevel, rest)
Definition: elog.h:122
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:85
static int elevel
Definition: vacuumlazy.c:137
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define AccessExclusiveLock
Definition: lockdefs.h:46
BlockNumber pages_removed
Definition: vacuumlazy.c:122
BlockNumber nonempty_pages
Definition: vacuumlazy.c:124
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:97
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:227
bool lock_waiter_detected
Definition: vacuumlazy.c:132
static void lazy_vacuum_heap ( Relation  onerel,
LVRelStats vacrelstats 
)
static

Definition at line 1381 of file vacuumlazy.c.

References buf, BufferGetPage, BufferIsValid, ConditionalLockBufferForCleanup(), LVRelStats::dead_tuples, elevel, ereport, errdetail(), errmsg(), InvalidBuffer, ItemPointerGetBlockNumber, lazy_vacuum_page(), MAIN_FORKNUM, PageGetHeapFreeSpace(), pg_rusage_init(), pg_rusage_show(), RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), RelationGetRelationName, ReleaseBuffer(), UnlockReleaseBuffer(), and vacuum_delay_point().

Referenced by lazy_scan_heap().

1382 {
1383  int tupindex;
1384  int npages;
1385  PGRUsage ru0;
1386  Buffer vmbuffer = InvalidBuffer;
1387 
1388  pg_rusage_init(&ru0);
1389  npages = 0;
1390 
1391  tupindex = 0;
1392  while (tupindex < vacrelstats->num_dead_tuples)
1393  {
1394  BlockNumber tblk;
1395  Buffer buf;
1396  Page page;
1397  Size freespace;
1398 
1400 
1401  tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1402  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
1403  vac_strategy);
1405  {
1406  ReleaseBuffer(buf);
1407  ++tupindex;
1408  continue;
1409  }
1410  tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
1411  &vmbuffer);
1412 
1413  /* Now that we've compacted the page, record its available space */
1414  page = BufferGetPage(buf);
1415  freespace = PageGetHeapFreeSpace(page);
1416 
1417  UnlockReleaseBuffer(buf);
1418  RecordPageWithFreeSpace(onerel, tblk, freespace);
1419  npages++;
1420  }
1421 
1422  if (BufferIsValid(vmbuffer))
1423  {
1424  ReleaseBuffer(vmbuffer);
1425  vmbuffer = InvalidBuffer;
1426  }
1427 
1428  ereport(elevel,
1429  (errmsg("\"%s\": removed %d row versions in %d pages",
1430  RelationGetRelationName(onerel),
1431  tupindex, npages),
1432  errdetail("%s.",
1433  pg_rusage_show(&ru0))));
1434 }
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
Definition: vacuumlazy.c:1447
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:181
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
#define InvalidBuffer
Definition: buf.h:25
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3718
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:666
ItemPointer dead_tuples
Definition: vacuumlazy.c:129
static char * buf
Definition: pg_test_fsync.c:66
int errdetail(const char *fmt,...)
Definition: elog.c:873
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define RelationGetRelationName(relation)
Definition: rel.h:437
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
static int elevel
Definition: vacuumlazy.c:137
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:143
size_t Size
Definition: c.h:356
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:75
void vacuum_delay_point(void)
Definition: vacuum.c:1560
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74
static void lazy_vacuum_index ( Relation  indrel,
IndexBulkDeleteResult **  stats,
LVRelStats vacrelstats 
)
static

Definition at line 1597 of file vacuumlazy.c.

References IndexVacuumInfo::analyze_only, elevel, ereport, errdetail(), errmsg(), IndexVacuumInfo::estimated_count, IndexVacuumInfo::index, index_bulk_delete(), lazy_tid_reaped(), IndexVacuumInfo::message_level, LVRelStats::num_dead_tuples, IndexVacuumInfo::num_heap_tuples, LVRelStats::old_rel_tuples, pg_rusage_init(), pg_rusage_show(), RelationGetRelationName, IndexVacuumInfo::strategy, and vac_strategy.

Referenced by lazy_scan_heap().

1600 {
1601  IndexVacuumInfo ivinfo;
1602  PGRUsage ru0;
1603 
1604  pg_rusage_init(&ru0);
1605 
1606  ivinfo.index = indrel;
1607  ivinfo.analyze_only = false;
1608  ivinfo.estimated_count = true;
1609  ivinfo.message_level = elevel;
1610  ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples;
1611  ivinfo.strategy = vac_strategy;
1612 
1613  /* Do bulk deletion */
1614  *stats = index_bulk_delete(&ivinfo, *stats,
1615  lazy_tid_reaped, (void *) vacrelstats);
1616 
1617  ereport(elevel,
1618  (errmsg("scanned index \"%s\" to remove %d row versions",
1619  RelationGetRelationName(indrel),
1620  vacrelstats->num_dead_tuples),
1621  errdetail("%s.", pg_rusage_show(&ru0))));
1622 }
static bool lazy_tid_reaped(ItemPointer itemptr, void *state)
Definition: vacuumlazy.c:2046
double old_rel_tuples
Definition: vacuumlazy.c:119
bool analyze_only
Definition: genam.h:47
BufferAccessStrategy strategy
Definition: genam.h:51
Relation index
Definition: genam.h:46
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int errdetail(const char *fmt,...)
Definition: elog.c:873
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define RelationGetRelationName(relation)
Definition: rel.h:437
int num_dead_tuples
Definition: vacuumlazy.c:127
#define ereport(elevel, rest)
Definition: elog.h:122
static int elevel
Definition: vacuumlazy.c:137
IndexBulkDeleteResult * index_bulk_delete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: indexam.c:743
int message_level
Definition: genam.h:49
double num_heap_tuples
Definition: genam.h:50
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:143
int errmsg(const char *fmt,...)
Definition: elog.c:797
bool estimated_count
Definition: genam.h:48
static int lazy_vacuum_page ( Relation  onerel,
BlockNumber  blkno,
Buffer  buffer,
int  tupindex,
LVRelStats vacrelstats,
Buffer vmbuffer 
)
static

Definition at line 1447 of file vacuumlazy.c.

References Assert, BufferGetPage, BufferIsValid, LVRelStats::dead_tuples, END_CRIT_SECTION, heap_page_is_all_visible(), InvalidXLogRecPtr, ItemIdSetUnused, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LVRelStats::latestRemovedXid, log_heap_clean(), MarkBufferDirty(), MaxOffsetNumber, NULL, LVRelStats::num_dead_tuples, PageGetItemId, PageIsAllVisible, PageRepairFragmentation(), PageSetAllVisible, PageSetLSN, pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, RelationNeedsWAL, START_CRIT_SECTION, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_get_status(), and visibilitymap_set().

Referenced by lazy_scan_heap(), and lazy_vacuum_heap().

1449 {
1450  Page page = BufferGetPage(buffer);
1451  OffsetNumber unused[MaxOffsetNumber];
1452  int uncnt = 0;
1453  TransactionId visibility_cutoff_xid;
1454  bool all_frozen;
1455 
1457 
1459 
1460  for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
1461  {
1462  BlockNumber tblk;
1463  OffsetNumber toff;
1464  ItemId itemid;
1465 
1466  tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1467  if (tblk != blkno)
1468  break; /* past end of tuples for this block */
1469  toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
1470  itemid = PageGetItemId(page, toff);
1471  ItemIdSetUnused(itemid);
1472  unused[uncnt++] = toff;
1473  }
1474 
1476 
1477  /*
1478  * Mark buffer dirty before we write WAL.
1479  */
1481 
1482  /* XLOG stuff */
1483  if (RelationNeedsWAL(onerel))
1484  {
1485  XLogRecPtr recptr;
1486 
1487  recptr = log_heap_clean(onerel, buffer,
1488  NULL, 0, NULL, 0,
1489  unused, uncnt,
1490  vacrelstats->latestRemovedXid);
1491  PageSetLSN(page, recptr);
1492  }
1493 
1494  /*
1495  * End critical section, so we safely can do visibility tests (which
1496  * possibly need to perform IO and allocate memory!). If we crash now the
1497  * page (including the corresponding vm bit) might not be marked all
1498  * visible, but that's fine. A later vacuum will fix that.
1499  */
1500  END_CRIT_SECTION();
1501 
1502  /*
1503  * Now that we have removed the dead tuples from the page, once again
1504  * check if the page has become all-visible. The page is already marked
1505  * dirty, exclusively locked, and, if needed, a full page image has been
1506  * emitted in the log_heap_clean() above.
1507  */
1508  if (heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid,
1509  &all_frozen))
1510  PageSetAllVisible(page);
1511 
1512  /*
1513  * All the changes to the heap page have been done. If the all-visible
1514  * flag is now set, also set the VM all-visible bit (and, if possible, the
1515  * all-frozen bit) unless this has already been done previously.
1516  */
1517  if (PageIsAllVisible(page))
1518  {
1519  uint8 vm_status = visibilitymap_get_status(onerel, blkno, vmbuffer);
1520  uint8 flags = 0;
1521 
1522  /* Set the VM all-frozen bit to flag, if needed */
1523  if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
1524  flags |= VISIBILITYMAP_ALL_VISIBLE;
1525  if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
1526  flags |= VISIBILITYMAP_ALL_FROZEN;
1527 
1528  Assert(BufferIsValid(*vmbuffer));
1529  if (flags != 0)
1530  visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr,
1531  *vmbuffer, visibility_cutoff_xid, flags);
1532  }
1533 
1534  return tupindex;
1535 }
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define PageIsAllVisible(page)
Definition: bufpage.h:382
uint32 TransactionId
Definition: c.h:397
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
#define MaxOffsetNumber
Definition: off.h:28
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3053
#define VISIBILITYMAP_ALL_FROZEN
Definition: visibilitymap.h:27
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
unsigned char uint8
Definition: c.h:266
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
uint32 BlockNumber
Definition: block.h:31
uint16 OffsetNumber
Definition: off.h:24
ItemPointer dead_tuples
Definition: vacuumlazy.c:129
#define PageSetAllVisible(page)
Definition: bufpage.h:384
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
int num_dead_tuples
Definition: vacuumlazy.c:127
static bool heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:2097
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:675
TransactionId latestRemovedXid
Definition: vacuumlazy.c:131
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:207
XLogRecPtr log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
Definition: heapam.c:7347
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:94
#define RelationNeedsWAL(relation)
Definition: rel.h:506
#define VISIBILITYMAP_ALL_VISIBLE
Definition: visibilitymap.h:26
void PageRepairFragmentation(Page page)
Definition: bufpage.c:479
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:75
#define ItemIdSetUnused(itemId)
Definition: itemid.h:127
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
Pointer Page
Definition: bufpage.h:74
void lazy_vacuum_rel ( Relation  onerel,
int  options,
VacuumParams params,
BufferAccessStrategy  bstrategy 
)

Definition at line 183 of file vacuumlazy.c.

References _, appendStringInfo(), Assert, buf, StringInfoData::data, DEBUG2, elevel, ereport, errmsg_internal(), FreeSpaceMapVacuum(), VacuumParams::freeze_min_age, VacuumParams::freeze_table_age, FreezeLimit, LVRelStats::frozenskipped_pages, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), LVRelStats::hasindex, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, IsAutoVacuumWorkerProcess(), lazy_scan_heap(), lazy_truncate_heap(), LVRelStats::lock_waiter_detected, LOG, VacuumParams::log_min_duration, VacuumParams::multixact_freeze_min_age, VacuumParams::multixact_freeze_table_age, MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyDatabaseId, LVRelStats::new_dead_tuples, LVRelStats::new_rel_tuples, NoLock, NULL, LVRelStats::num_index_scans, LVRelStats::old_rel_pages, LVRelStats::old_rel_tuples, OldestXmin, LVRelStats::pages_removed, palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), LVRelStats::pinskipped_pages, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, RelationData::rd_rel, LVRelStats::rel_pages, RelationGetNamespace, RelationGetRelationName, RelationGetRelid, RowExclusiveLock, LVRelStats::scanned_pages, should_attempt_truncation(), TimestampDifference(), TimestampDifferenceExceeds(), TransactionIdPrecedesOrEquals(), LVRelStats::tupcount_pages, LVRelStats::tuples_deleted, vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, vacuum_set_xid_limits(), VacuumPageDirty, VacuumPageHit, VacuumPageMiss, and visibilitymap_count().

Referenced by vacuum_rel().

185 {
186  LVRelStats *vacrelstats;
187  Relation *Irel;
188  int nindexes;
189  PGRUsage ru0;
190  TimestampTz starttime = 0;
191  long secs;
192  int usecs;
193  double read_rate,
194  write_rate;
195  bool aggressive; /* should we scan all unfrozen pages? */
196  bool scanned_all_unfrozen; /* actually scanned all such pages? */
197  TransactionId xidFullScanLimit;
198  MultiXactId mxactFullScanLimit;
199  BlockNumber new_rel_pages;
200  double new_rel_tuples;
201  BlockNumber new_rel_allvisible;
202  double new_live_tuples;
203  TransactionId new_frozen_xid;
204  MultiXactId new_min_multi;
205 
206  Assert(params != NULL);
207 
208  /* measure elapsed time iff autovacuum logging requires it */
209  if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
210  {
211  pg_rusage_init(&ru0);
212  starttime = GetCurrentTimestamp();
213  }
214 
215  if (options & VACOPT_VERBOSE)
216  elevel = INFO;
217  else
218  elevel = DEBUG2;
219 
221  RelationGetRelid(onerel));
222 
223  vac_strategy = bstrategy;
224 
225  vacuum_set_xid_limits(onerel,
226  params->freeze_min_age,
227  params->freeze_table_age,
228  params->multixact_freeze_min_age,
230  &OldestXmin, &FreezeLimit, &xidFullScanLimit,
231  &MultiXactCutoff, &mxactFullScanLimit);
232 
233  /*
234  * We request an aggressive scan if the table's frozen Xid is now older
235  * than or equal to the requested Xid full-table scan limit; or if the
236  * table's minimum MultiXactId is older than or equal to the requested
237  * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
238  */
239  aggressive = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
240  xidFullScanLimit);
241  aggressive |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
242  mxactFullScanLimit);
244  aggressive = true;
245 
246  vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
247 
248  vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
249  vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples;
250  vacrelstats->num_index_scans = 0;
251  vacrelstats->pages_removed = 0;
252  vacrelstats->lock_waiter_detected = false;
253 
254  /* Open all indexes of the relation */
255  vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
256  vacrelstats->hasindex = (nindexes > 0);
257 
258  /* Do the vacuuming */
259  lazy_scan_heap(onerel, options, vacrelstats, Irel, nindexes, aggressive);
260 
261  /* Done with indexes */
262  vac_close_indexes(nindexes, Irel, NoLock);
263 
264  /*
265  * Compute whether we actually scanned the all unfrozen pages. If we did,
266  * we can adjust relfrozenxid and relminmxid.
267  *
268  * NB: We need to check this before truncating the relation, because that
269  * will change ->rel_pages.
270  */
271  if ((vacrelstats->scanned_pages + vacrelstats->frozenskipped_pages)
272  < vacrelstats->rel_pages)
273  {
274  Assert(!aggressive);
275  scanned_all_unfrozen = false;
276  }
277  else
278  scanned_all_unfrozen = true;
279 
280  /*
281  * Optionally truncate the relation.
282  */
283  if (should_attempt_truncation(vacrelstats))
284  lazy_truncate_heap(onerel, vacrelstats);
285 
286  /* Report that we are now doing final cleanup */
289 
290  /* Vacuum the Free Space Map */
291  FreeSpaceMapVacuum(onerel);
292 
293  /*
294  * Update statistics in pg_class.
295  *
296  * A corner case here is that if we scanned no pages at all because every
297  * page is all-visible, we should not update relpages/reltuples, because
298  * we have no new information to contribute. In particular this keeps us
299  * from replacing relpages=reltuples=0 (which means "unknown tuple
300  * density") with nonzero relpages and reltuples=0 (which means "zero
301  * tuple density") unless there's some actual evidence for the latter.
302  *
303  * It's important that we use tupcount_pages and not scanned_pages for the
304  * check described above; scanned_pages counts pages where we could not
305  * get cleanup lock, and which were processed only for frozenxid purposes.
306  *
307  * We do update relallvisible even in the corner case, since if the table
308  * is all-visible we'd definitely like to know that. But clamp the value
309  * to be not more than what we're setting relpages to.
310  *
311  * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
312  * since then we don't know for certain that all tuples have a newer xmin.
313  */
314  new_rel_pages = vacrelstats->rel_pages;
315  new_rel_tuples = vacrelstats->new_rel_tuples;
316  if (vacrelstats->tupcount_pages == 0 && new_rel_pages > 0)
317  {
318  new_rel_pages = vacrelstats->old_rel_pages;
319  new_rel_tuples = vacrelstats->old_rel_tuples;
320  }
321 
322  visibilitymap_count(onerel, &new_rel_allvisible, NULL);
323  if (new_rel_allvisible > new_rel_pages)
324  new_rel_allvisible = new_rel_pages;
325 
326  new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
327  new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
328 
329  vac_update_relstats(onerel,
330  new_rel_pages,
331  new_rel_tuples,
332  new_rel_allvisible,
333  vacrelstats->hasindex,
334  new_frozen_xid,
335  new_min_multi,
336  false);
337 
338  /* report results to the stats collector, too */
339  new_live_tuples = new_rel_tuples - vacrelstats->new_dead_tuples;
340  if (new_live_tuples < 0)
341  new_live_tuples = 0; /* just in case */
342 
344  onerel->rd_rel->relisshared,
345  new_live_tuples,
346  vacrelstats->new_dead_tuples);
348 
349  /* and log the action if appropriate */
350  if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
351  {
352  TimestampTz endtime = GetCurrentTimestamp();
353 
354  if (params->log_min_duration == 0 ||
355  TimestampDifferenceExceeds(starttime, endtime,
356  params->log_min_duration))
357  {
359 
360  TimestampDifference(starttime, endtime, &secs, &usecs);
361 
362  read_rate = 0;
363  write_rate = 0;
364  if ((secs > 0) || (usecs > 0))
365  {
366  read_rate = (double) BLCKSZ *VacuumPageMiss / (1024 * 1024) /
367  (secs + usecs / 1000000.0);
368  write_rate = (double) BLCKSZ *VacuumPageDirty / (1024 * 1024) /
369  (secs + usecs / 1000000.0);
370  }
371 
372  /*
373  * This is pretty messy, but we split it up so that we can skip
374  * emitting individual parts of the message when not applicable.
375  */
376  initStringInfo(&buf);
377  appendStringInfo(&buf, _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"),
380  RelationGetRelationName(onerel),
381  vacrelstats->num_index_scans);
382  appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
383  vacrelstats->pages_removed,
384  vacrelstats->rel_pages,
385  vacrelstats->pinskipped_pages,
386  vacrelstats->frozenskipped_pages);
387  appendStringInfo(&buf,
388  _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n"),
389  vacrelstats->tuples_deleted,
390  vacrelstats->new_rel_tuples,
391  vacrelstats->new_dead_tuples,
392  OldestXmin);
393  appendStringInfo(&buf,
394  _("buffer usage: %d hits, %d misses, %d dirtied\n"),
398  appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
399  read_rate, write_rate);
400  appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
401 
402  ereport(LOG,
403  (errmsg_internal("%s", buf.data)));
404  pfree(buf.data);
405  }
406  }
407 }
double new_rel_tuples
Definition: vacuumlazy.c:120
int multixact_freeze_table_age
Definition: vacuum.h:142
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:1539
BlockNumber rel_pages
Definition: vacuumlazy.c:113
uint32 TransactionId
Definition: c.h:397
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
Definition: pgstat.c:3032
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1570
double tuples_deleted
Definition: vacuumlazy.c:123
int64 TimestampTz
Definition: timestamp.h:39
int VacuumPageHit
Definition: globals.c:134
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3053
double old_rel_tuples
Definition: vacuumlazy.c:119
BlockNumber tupcount_pages
Definition: vacuumlazy.c:117
static void lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, Relation *Irel, int nindexes, bool aggressive)
Definition: vacuumlazy.c:459
BlockNumber scanned_pages
Definition: vacuumlazy.c:114
#define INFO
Definition: elog.h:33
void vacuum_set_xid_limits(Relation rel, int freeze_min_age, int freeze_table_age, int multixact_freeze_min_age, int multixact_freeze_table_age, TransactionId *oldestXmin, TransactionId *freezeLimit, TransactionId *xidFullScanLimit, MultiXactId *multiXactCutoff, MultiXactId *mxactFullScanLimit)
Definition: vacuum.c:501
uint32 BlockNumber
Definition: block.h:31
BlockNumber pinskipped_pages
Definition: vacuumlazy.c:115
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
#define LOG
Definition: elog.h:26
Form_pg_class rd_rel
Definition: rel.h:114
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1649
int freeze_table_age
Definition: vacuum.h:139
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
BlockNumber old_rel_pages
Definition: vacuumlazy.c:112
void pfree(void *pointer)
Definition: mcxt.c:950
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:110
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:319
int freeze_min_age
Definition: vacuum.h:138
char * get_database_name(Oid dbid)
Definition: dbcommands.c:2056
#define DEBUG2
Definition: elog.h:24
static TransactionId FreezeLimit
Definition: vacuumlazy.c:140
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3038
#define NoLock
Definition: lockdefs.h:34
static char * buf
Definition: pg_test_fsync.c:66
#define RowExclusiveLock
Definition: lockdefs.h:38
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:141
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:437
static TransactionId OldestXmin
Definition: vacuumlazy.c:139
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3360
#define ereport(elevel, rest)
Definition: elog.h:122
void FreeSpaceMapVacuum(Relation rel)
Definition: freespace.c:379
void initStringInfo(StringInfo str)
Definition: stringinfo.c:65
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:1496
static int elevel
Definition: vacuumlazy.c:137
bool hasindex
Definition: vacuumlazy.c:110
int VacuumPageDirty
Definition: globals.c:136
void * palloc0(Size size)
Definition: mcxt.c:878
void pgstat_progress_end_command(void)
Definition: pgstat.c:3104
Oid MyDatabaseId
Definition: globals.c:76
#define InvalidMultiXactId
Definition: multixact.h:23
static bool should_attempt_truncation(LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1698
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:35
int num_index_scans
Definition: vacuumlazy.c:130
TransactionId MultiXactId
Definition: c.h:407
int errmsg_internal(const char *fmt,...)
Definition: elog.c:827
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:143
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
double new_dead_tuples
Definition: vacuumlazy.c:121
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
int log_min_duration
Definition: vacuum.h:145
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
Definition: pgstat.c:1370
BlockNumber pages_removed
Definition: vacuumlazy.c:122
BlockNumber frozenskipped_pages
Definition: vacuumlazy.c:116
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3154
int VacuumPageMiss
Definition: globals.c:135
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1624
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool in_outer_xact)
Definition: vacuum.c:785
#define _(x)
Definition: elog.c:84
#define RelationGetRelid(relation)
Definition: rel.h:417
int multixact_freeze_min_age
Definition: vacuum.h:140
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1716
#define RelationGetNamespace(relation)
Definition: rel.h:444
bool lock_waiter_detected
Definition: vacuumlazy.c:132
static bool should_attempt_truncation ( LVRelStats vacrelstats)
static

Definition at line 1698 of file vacuumlazy.c.

References LVRelStats::nonempty_pages, old_snapshot_threshold, LVRelStats::rel_pages, REL_TRUNCATE_FRACTION, and REL_TRUNCATE_MINIMUM.

Referenced by lazy_vacuum_rel().

1699 {
1700  BlockNumber possibly_freeable;
1701 
1702  possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
1703  if (possibly_freeable > 0 &&
1704  (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
1705  possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
1707  return true;
1708  else
1709  return false;
1710 }
BlockNumber rel_pages
Definition: vacuumlazy.c:113
uint32 BlockNumber
Definition: block.h:31
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:74
BlockNumber nonempty_pages
Definition: vacuumlazy.c:124
int old_snapshot_threshold
Definition: snapmgr.c:74
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:75
static int vac_cmp_itemptr ( const void *  left,
const void *  right 
)
static

Definition at line 2064 of file vacuumlazy.c.

References ItemPointerGetBlockNumber, and ItemPointerGetOffsetNumber.

Referenced by lazy_tid_reaped().

2065 {
2066  BlockNumber lblk,
2067  rblk;
2068  OffsetNumber loff,
2069  roff;
2070 
2071  lblk = ItemPointerGetBlockNumber((ItemPointer) left);
2072  rblk = ItemPointerGetBlockNumber((ItemPointer) right);
2073 
2074  if (lblk < rblk)
2075  return -1;
2076  if (lblk > rblk)
2077  return 1;
2078 
2079  loff = ItemPointerGetOffsetNumber((ItemPointer) left);
2080  roff = ItemPointerGetOffsetNumber((ItemPointer) right);
2081 
2082  if (loff < roff)
2083  return -1;
2084  if (loff > roff)
2085  return 1;
2086 
2087  return 0;
2088 }
uint32 BlockNumber
Definition: block.h:31
uint16 OffsetNumber
Definition: off.h:24
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:94
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:75
static void vacuum_log_cleanup_info ( Relation  rel,
LVRelStats vacrelstats 
)
static

Definition at line 426 of file vacuumlazy.c.

References LVRelStats::latestRemovedXid, log_heap_cleanup_info(), RelationData::rd_node, RelationNeedsWAL, TransactionIdIsValid, and XLogIsNeeded.

Referenced by lazy_scan_heap().

427 {
428  /*
429  * Skip this for relations for which no WAL is to be written, or if we're
430  * not trying to support archive recovery.
431  */
432  if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
433  return;
434 
435  /*
436  * No need to write the record at all unless it contains a valid value
437  */
438  if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
439  (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
440 }
XLogRecPtr log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
Definition: heapam.c:7318
#define XLogIsNeeded()
Definition: xlog.h:145
RelFileNode rd_node
Definition: rel.h:85
TransactionId latestRemovedXid
Definition: vacuumlazy.c:131
#define RelationNeedsWAL(relation)
Definition: rel.h:506
#define TransactionIdIsValid(xid)
Definition: transam.h:41

Variable Documentation

TransactionId FreezeLimit
static

Definition at line 140 of file vacuumlazy.c.

Referenced by lazy_check_needs_freeze(), lazy_scan_heap(), and lazy_vacuum_rel().

MultiXactId MultiXactCutoff
static
BufferAccessStrategy vac_strategy
static

Definition at line 143 of file vacuumlazy.c.

Referenced by lazy_cleanup_index(), and lazy_vacuum_index().