PostgreSQL Source Code  git master
vacuumlazy.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * vacuumlazy.c
4  * Concurrent ("lazy") vacuuming.
5  *
6  *
7  * The major space usage for LAZY VACUUM is storage for the array of dead tuple
8  * TIDs. We want to ensure we can vacuum even the very largest relations with
9  * finite memory space usage. To do that, we set upper bounds on the number of
10  * tuples we will keep track of at once.
11  *
12  * We are willing to use at most maintenance_work_mem (or perhaps
13  * autovacuum_work_mem) memory space to keep track of dead tuples. We
14  * initially allocate an array of TIDs of that size, with an upper limit that
15  * depends on table size (this limit ensures we don't allocate a huge area
16  * uselessly for vacuuming small tables). If the array threatens to overflow,
17  * we suspend the heap scan phase and perform a pass of index cleanup and page
18  * compaction, then resume the heap scan with an empty TID array.
19  *
20  * If we're processing a table with no indexes, we can just vacuum each page
21  * as we go; there's no need to save up multiple tuples to minimize the number
22  * of index scans performed. So we don't use maintenance_work_mem memory for
23  * the TID array, just enough to hold as many heap tuples as fit on one page.
24  *
25  *
26  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
27  * Portions Copyright (c) 1994, Regents of the University of California
28  *
29  *
30  * IDENTIFICATION
31  * src/backend/commands/vacuumlazy.c
32  *
33  *-------------------------------------------------------------------------
34  */
35 #include "postgres.h"
36 
37 #include <math.h>
38 
39 #include "access/genam.h"
40 #include "access/heapam.h"
41 #include "access/heapam_xlog.h"
42 #include "access/htup_details.h"
43 #include "access/multixact.h"
44 #include "access/transam.h"
45 #include "access/visibilitymap.h"
46 #include "access/xlog.h"
47 #include "catalog/catalog.h"
48 #include "catalog/storage.h"
49 #include "commands/dbcommands.h"
50 #include "commands/progress.h"
51 #include "commands/vacuum.h"
52 #include "miscadmin.h"
53 #include "pgstat.h"
54 #include "portability/instr_time.h"
55 #include "postmaster/autovacuum.h"
56 #include "storage/bufmgr.h"
57 #include "storage/freespace.h"
58 #include "storage/lmgr.h"
59 #include "utils/lsyscache.h"
60 #include "utils/memutils.h"
61 #include "utils/pg_rusage.h"
62 #include "utils/timestamp.h"
63 #include "utils/tqual.h"
64 
65 
66 /*
67  * Space/time tradeoff parameters: do these need to be user-tunable?
68  *
69  * To consider truncating the relation, we want there to be at least
70  * REL_TRUNCATE_MINIMUM or (relsize / REL_TRUNCATE_FRACTION) (whichever
71  * is less) potentially-freeable pages.
72  */
73 #define REL_TRUNCATE_MINIMUM 1000
74 #define REL_TRUNCATE_FRACTION 16
75 
76 /*
77  * Timing parameters for truncate locking heuristics.
78  *
79  * These were not exposed as user tunable GUC values because it didn't seem
80  * that the potential for improvement was great enough to merit the cost of
81  * supporting them.
82  */
83 #define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
84 #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
85 #define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
86 
87 /*
88  * Guesstimation of number of dead tuples per page. This is used to
89  * provide an upper limit to memory allocated when vacuuming small
90  * tables.
91  */
92 #define LAZY_ALLOC_TUPLES MaxHeapTuplesPerPage
93 
94 /*
95  * Before we consider skipping a page that's marked as clean in
96  * visibility map, we must've seen at least this many clean pages.
97  */
98 #define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
99 
100 /*
101  * Size of the prefetch window for lazy vacuum backwards truncation scan.
102  * Needs to be a power of 2.
103  */
104 #define PREFETCH_SIZE ((BlockNumber) 32)
105 
106 typedef struct LVRelStats
107 {
108  /* hasindex = true means two-pass strategy; false means one-pass */
109  bool hasindex;
110  /* Overall statistics about rel */
111  BlockNumber old_rel_pages; /* previous value of pg_class.relpages */
112  BlockNumber rel_pages; /* total number of pages */
113  BlockNumber scanned_pages; /* number of pages we examined */
114  BlockNumber pinskipped_pages; /* # of pages we skipped due to a pin */
115  BlockNumber frozenskipped_pages; /* # of frozen pages we skipped */
116  BlockNumber tupcount_pages; /* pages whose tuples we counted */
117  double scanned_tuples; /* counts only tuples on tupcount_pages */
118  double old_rel_tuples; /* previous value of pg_class.reltuples */
119  double new_rel_tuples; /* new estimated total # of tuples */
120  double new_dead_tuples; /* new estimated total # of dead tuples */
123  BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
124  /* List of TIDs of tuples we intend to delete */
125  /* NB: this list is ordered by TID address */
126  int num_dead_tuples; /* current # of entries */
127  int max_dead_tuples; /* # slots allocated in array */
128  ItemPointer dead_tuples; /* array of ItemPointerData */
132 } LVRelStats;
133 
134 
135 /* A few variables that don't seem worth passing around as parameters */
136 static int elevel = -1;
137 
141 
143 
144 
145 /* non-export function prototypes */
146 static void lazy_scan_heap(Relation onerel, int options,
147  LVRelStats *vacrelstats, Relation *Irel, int nindexes,
148  bool aggressive);
149 static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
150 static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
151 static void lazy_vacuum_index(Relation indrel,
152  IndexBulkDeleteResult **stats,
153  LVRelStats *vacrelstats);
154 static void lazy_cleanup_index(Relation indrel,
155  IndexBulkDeleteResult *stats,
156  LVRelStats *vacrelstats);
157 static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
158  int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
159 static bool should_attempt_truncation(LVRelStats *vacrelstats);
160 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
162  LVRelStats *vacrelstats);
163 static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
164 static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
165  ItemPointer itemptr);
166 static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
167 static int vac_cmp_itemptr(const void *left, const void *right);
169  TransactionId *visibility_cutoff_xid, bool *all_frozen);
170 
171 
172 /*
173  * lazy_vacuum_rel() -- perform LAZY VACUUM for one heap relation
174  *
175  * This routine vacuums a single heap, cleans out its indexes, and
176  * updates its relpages and reltuples statistics.
177  *
178  * At entry, we have already established a transaction and opened
179  * and locked the relation.
180  */
181 void
183  BufferAccessStrategy bstrategy)
184 {
185  LVRelStats *vacrelstats;
186  Relation *Irel;
187  int nindexes;
188  PGRUsage ru0;
189  TimestampTz starttime = 0;
190  long secs;
191  int usecs;
192  double read_rate,
193  write_rate;
194  bool aggressive; /* should we scan all unfrozen pages? */
195  bool scanned_all_unfrozen; /* actually scanned all such pages? */
196  TransactionId xidFullScanLimit;
197  MultiXactId mxactFullScanLimit;
198  BlockNumber new_rel_pages;
199  double new_rel_tuples;
200  BlockNumber new_rel_allvisible;
201  double new_live_tuples;
202  TransactionId new_frozen_xid;
203  MultiXactId new_min_multi;
204 
205  Assert(params != NULL);
206 
207  /* measure elapsed time iff autovacuum logging requires it */
208  if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
209  {
210  pg_rusage_init(&ru0);
211  starttime = GetCurrentTimestamp();
212  }
213 
214  if (options & VACOPT_VERBOSE)
215  elevel = INFO;
216  else
217  elevel = DEBUG2;
218 
220  RelationGetRelid(onerel));
221 
222  vac_strategy = bstrategy;
223 
224  vacuum_set_xid_limits(onerel,
225  params->freeze_min_age,
226  params->freeze_table_age,
227  params->multixact_freeze_min_age,
229  &OldestXmin, &FreezeLimit, &xidFullScanLimit,
230  &MultiXactCutoff, &mxactFullScanLimit);
231 
232  /*
233  * We request an aggressive scan if the table's frozen Xid is now older
234  * than or equal to the requested Xid full-table scan limit; or if the
235  * table's minimum MultiXactId is older than or equal to the requested
236  * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
237  */
238  aggressive = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
239  xidFullScanLimit);
240  aggressive |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
241  mxactFullScanLimit);
242  if (options & VACOPT_DISABLE_PAGE_SKIPPING)
243  aggressive = true;
244 
245  vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
246 
247  vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
248  vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples;
249  vacrelstats->num_index_scans = 0;
250  vacrelstats->pages_removed = 0;
251  vacrelstats->lock_waiter_detected = false;
252 
253  /* Open all indexes of the relation */
254  vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
255  vacrelstats->hasindex = (nindexes > 0);
256 
257  /* Do the vacuuming */
258  lazy_scan_heap(onerel, options, vacrelstats, Irel, nindexes, aggressive);
259 
260  /* Done with indexes */
261  vac_close_indexes(nindexes, Irel, NoLock);
262 
263  /*
264  * Compute whether we actually scanned the all unfrozen pages. If we did,
265  * we can adjust relfrozenxid and relminmxid.
266  *
267  * NB: We need to check this before truncating the relation, because that
268  * will change ->rel_pages.
269  */
270  if ((vacrelstats->scanned_pages + vacrelstats->frozenskipped_pages)
271  < vacrelstats->rel_pages)
272  {
273  Assert(!aggressive);
274  scanned_all_unfrozen = false;
275  }
276  else
277  scanned_all_unfrozen = true;
278 
279  /*
280  * Optionally truncate the relation.
281  */
282  if (should_attempt_truncation(vacrelstats))
283  lazy_truncate_heap(onerel, vacrelstats);
284 
285  /* Report that we are now doing final cleanup */
288 
289  /* Vacuum the Free Space Map */
290  FreeSpaceMapVacuum(onerel);
291 
292  /*
293  * Update statistics in pg_class.
294  *
295  * A corner case here is that if we scanned no pages at all because every
296  * page is all-visible, we should not update relpages/reltuples, because
297  * we have no new information to contribute. In particular this keeps us
298  * from replacing relpages=reltuples=0 (which means "unknown tuple
299  * density") with nonzero relpages and reltuples=0 (which means "zero
300  * tuple density") unless there's some actual evidence for the latter.
301  *
302  * It's important that we use tupcount_pages and not scanned_pages for the
303  * check described above; scanned_pages counts pages where we could not
304  * get cleanup lock, and which were processed only for frozenxid purposes.
305  *
306  * We do update relallvisible even in the corner case, since if the table
307  * is all-visible we'd definitely like to know that. But clamp the value
308  * to be not more than what we're setting relpages to.
309  *
310  * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
311  * since then we don't know for certain that all tuples have a newer xmin.
312  */
313  new_rel_pages = vacrelstats->rel_pages;
314  new_rel_tuples = vacrelstats->new_rel_tuples;
315  if (vacrelstats->tupcount_pages == 0 && new_rel_pages > 0)
316  {
317  new_rel_pages = vacrelstats->old_rel_pages;
318  new_rel_tuples = vacrelstats->old_rel_tuples;
319  }
320 
321  visibilitymap_count(onerel, &new_rel_allvisible, NULL);
322  if (new_rel_allvisible > new_rel_pages)
323  new_rel_allvisible = new_rel_pages;
324 
325  new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
326  new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
327 
328  vac_update_relstats(onerel,
329  new_rel_pages,
330  new_rel_tuples,
331  new_rel_allvisible,
332  vacrelstats->hasindex,
333  new_frozen_xid,
334  new_min_multi,
335  false);
336 
337  /* report results to the stats collector, too */
338  new_live_tuples = new_rel_tuples - vacrelstats->new_dead_tuples;
339  if (new_live_tuples < 0)
340  new_live_tuples = 0; /* just in case */
341 
343  onerel->rd_rel->relisshared,
344  new_live_tuples,
345  vacrelstats->new_dead_tuples);
347 
348  /* and log the action if appropriate */
349  if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
350  {
351  TimestampTz endtime = GetCurrentTimestamp();
352 
353  if (params->log_min_duration == 0 ||
354  TimestampDifferenceExceeds(starttime, endtime,
355  params->log_min_duration))
356  {
358  char *msgfmt;
359 
360  TimestampDifference(starttime, endtime, &secs, &usecs);
361 
362  read_rate = 0;
363  write_rate = 0;
364  if ((secs > 0) || (usecs > 0))
365  {
366  read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
367  (secs + usecs / 1000000.0);
368  write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
369  (secs + usecs / 1000000.0);
370  }
371 
372  /*
373  * This is pretty messy, but we split it up so that we can skip
374  * emitting individual parts of the message when not applicable.
375  */
376  initStringInfo(&buf);
377  if (aggressive)
378  msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
379  else
380  msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
381  appendStringInfo(&buf, msgfmt,
384  RelationGetRelationName(onerel),
385  vacrelstats->num_index_scans);
386  appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
387  vacrelstats->pages_removed,
388  vacrelstats->rel_pages,
389  vacrelstats->pinskipped_pages,
390  vacrelstats->frozenskipped_pages);
391  appendStringInfo(&buf,
392  _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n"),
393  vacrelstats->tuples_deleted,
394  vacrelstats->new_rel_tuples,
395  vacrelstats->new_dead_tuples,
396  OldestXmin);
397  appendStringInfo(&buf,
398  _("buffer usage: %d hits, %d misses, %d dirtied\n"),
402  appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
403  read_rate, write_rate);
404  appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
405 
406  ereport(LOG,
407  (errmsg_internal("%s", buf.data)));
408  pfree(buf.data);
409  }
410  }
411 }
412 
413 /*
414  * For Hot Standby we need to know the highest transaction id that will
415  * be removed by any change. VACUUM proceeds in a number of passes so
416  * we need to consider how each pass operates. The first phase runs
417  * heap_page_prune(), which can issue XLOG_HEAP2_CLEAN records as it
418  * progresses - these will have a latestRemovedXid on each record.
419  * In some cases this removes all of the tuples to be removed, though
420  * often we have dead tuples with index pointers so we must remember them
421  * for removal in phase 3. Index records for those rows are removed
422  * in phase 2 and index blocks do not have MVCC information attached.
423  * So before we can allow removal of any index tuples we need to issue
424  * a WAL record containing the latestRemovedXid of rows that will be
425  * removed in phase three. This allows recovery queries to block at the
426  * correct place, i.e. before phase two, rather than during phase three
427  * which would be after the rows have become inaccessible.
428  */
429 static void
431 {
432  /*
433  * Skip this for relations for which no WAL is to be written, or if we're
434  * not trying to support archive recovery.
435  */
436  if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
437  return;
438 
439  /*
440  * No need to write the record at all unless it contains a valid value
441  */
442  if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
443  (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
444 }
445 
446 /*
447  * lazy_scan_heap() -- scan an open heap relation
448  *
449  * This routine prunes each page in the heap, which will among other
450  * things truncate dead tuples to dead line pointers, defragment the
451  * page, and set commit status bits (see heap_page_prune). It also builds
452  * lists of dead tuples and pages with free space, calculates statistics
453  * on the number of live tuples in the heap, and marks pages as
454  * all-visible if appropriate. When done, or when we run low on space for
455  * dead-tuple TIDs, invoke vacuuming of indexes and call lazy_vacuum_heap
456  * to reclaim dead line pointers.
457  *
458  * If there are no indexes then we can reclaim line pointers on the fly;
459  * dead line pointers need only be retained until all index pointers that
460  * reference them have been killed.
461  */
462 static void
463 lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
464  Relation *Irel, int nindexes, bool aggressive)
465 {
466  BlockNumber nblocks,
467  blkno;
468  HeapTupleData tuple;
469  char *relname;
470  BlockNumber empty_pages,
471  vacuumed_pages;
472  double num_tuples,
473  tups_vacuumed,
474  nkeep,
475  nunused;
476  IndexBulkDeleteResult **indstats;
477  int i;
478  PGRUsage ru0;
479  Buffer vmbuffer = InvalidBuffer;
480  BlockNumber next_unskippable_block;
481  bool skipping_blocks;
482  xl_heap_freeze_tuple *frozen;
484  const int initprog_index[] = {
488  };
489  int64 initprog_val[3];
490 
491  pg_rusage_init(&ru0);
492 
493  relname = RelationGetRelationName(onerel);
494  if (aggressive)
495  ereport(elevel,
496  (errmsg("aggressively vacuuming \"%s.%s\"",
498  relname)));
499  else
500  ereport(elevel,
501  (errmsg("vacuuming \"%s.%s\"",
503  relname)));
504 
505  empty_pages = vacuumed_pages = 0;
506  num_tuples = tups_vacuumed = nkeep = nunused = 0;
507 
508  indstats = (IndexBulkDeleteResult **)
509  palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
510 
511  nblocks = RelationGetNumberOfBlocks(onerel);
512  vacrelstats->rel_pages = nblocks;
513  vacrelstats->scanned_pages = 0;
514  vacrelstats->tupcount_pages = 0;
515  vacrelstats->nonempty_pages = 0;
516  vacrelstats->latestRemovedXid = InvalidTransactionId;
517 
518  lazy_space_alloc(vacrelstats, nblocks);
520 
521  /* Report that we're scanning the heap, advertising total # of blocks */
522  initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
523  initprog_val[1] = nblocks;
524  initprog_val[2] = vacrelstats->max_dead_tuples;
525  pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
526 
527  /*
528  * Except when aggressive is set, we want to skip pages that are
529  * all-visible according to the visibility map, but only when we can skip
530  * at least SKIP_PAGES_THRESHOLD consecutive pages. Since we're reading
531  * sequentially, the OS should be doing readahead for us, so there's no
532  * gain in skipping a page now and then; that's likely to disable
533  * readahead and so be counterproductive. Also, skipping even a single
534  * page means that we can't update relfrozenxid, so we only want to do it
535  * if we can skip a goodly number of pages.
536  *
537  * When aggressive is set, we can't skip pages just because they are
538  * all-visible, but we can still skip pages that are all-frozen, since
539  * such pages do not need freezing and do not affect the value that we can
540  * safely set for relfrozenxid or relminmxid.
541  *
542  * Before entering the main loop, establish the invariant that
543  * next_unskippable_block is the next block number >= blkno that we can't
544  * skip based on the visibility map, either all-visible for a regular scan
545  * or all-frozen for an aggressive scan. We set it to nblocks if there's
546  * no such block. We also set up the skipping_blocks flag correctly at
547  * this stage.
548  *
549  * Note: The value returned by visibilitymap_get_status could be slightly
550  * out-of-date, since we make this test before reading the corresponding
551  * heap page or locking the buffer. This is OK. If we mistakenly think
552  * that the page is all-visible or all-frozen when in fact the flag's just
553  * been cleared, we might fail to vacuum the page. It's easy to see that
554  * skipping a page when aggressive is not set is not a very big deal; we
555  * might leave some dead tuples lying around, but the next vacuum will
556  * find them. But even when aggressive *is* set, it's still OK if we miss
557  * a page whose all-frozen marking has just been cleared. Any new XIDs
558  * just added to that page are necessarily newer than the GlobalXmin we
559  * computed, so they'll have no effect on the value to which we can safely
560  * set relfrozenxid. A similar argument applies for MXIDs and relminmxid.
561  *
562  * We will scan the table's last page, at least to the extent of
563  * determining whether it has tuples or not, even if it should be skipped
564  * according to the above rules; except when we've already determined that
565  * it's not worth trying to truncate the table. This avoids having
566  * lazy_truncate_heap() take access-exclusive lock on the table to attempt
567  * a truncation that just fails immediately because there are tuples in
568  * the last page. This is worth avoiding mainly because such a lock must
569  * be replayed on any hot standby, where it can be disruptive.
570  */
571  next_unskippable_block = 0;
572  if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
573  {
574  while (next_unskippable_block < nblocks)
575  {
576  uint8 vmstatus;
577 
578  vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
579  &vmbuffer);
580  if (aggressive)
581  {
582  if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
583  break;
584  }
585  else
586  {
587  if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
588  break;
589  }
591  next_unskippable_block++;
592  }
593  }
594 
595  if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
596  skipping_blocks = true;
597  else
598  skipping_blocks = false;
599 
600  for (blkno = 0; blkno < nblocks; blkno++)
601  {
602  Buffer buf;
603  Page page;
604  OffsetNumber offnum,
605  maxoff;
606  bool tupgone,
607  hastup;
608  int prev_dead_count;
609  int nfrozen;
610  Size freespace;
611  bool all_visible_according_to_vm = false;
612  bool all_visible;
613  bool all_frozen = true; /* provided all_visible is also true */
614  bool has_dead_tuples;
615  TransactionId visibility_cutoff_xid = InvalidTransactionId;
616 
617  /* see note above about forcing scanning of last page */
618 #define FORCE_CHECK_PAGE() \
619  (blkno == nblocks - 1 && should_attempt_truncation(vacrelstats))
620 
622 
623  if (blkno == next_unskippable_block)
624  {
625  /* Time to advance next_unskippable_block */
626  next_unskippable_block++;
627  if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
628  {
629  while (next_unskippable_block < nblocks)
630  {
631  uint8 vmskipflags;
632 
633  vmskipflags = visibilitymap_get_status(onerel,
634  next_unskippable_block,
635  &vmbuffer);
636  if (aggressive)
637  {
638  if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
639  break;
640  }
641  else
642  {
643  if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
644  break;
645  }
647  next_unskippable_block++;
648  }
649  }
650 
651  /*
652  * We know we can't skip the current block. But set up
653  * skipping_blocks to do the right thing at the following blocks.
654  */
655  if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD)
656  skipping_blocks = true;
657  else
658  skipping_blocks = false;
659 
660  /*
661  * Normally, the fact that we can't skip this block must mean that
662  * it's not all-visible. But in an aggressive vacuum we know only
663  * that it's not all-frozen, so it might still be all-visible.
664  */
665  if (aggressive && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
666  all_visible_according_to_vm = true;
667  }
668  else
669  {
670  /*
671  * The current block is potentially skippable; if we've seen a
672  * long enough run of skippable blocks to justify skipping it, and
673  * we're not forced to check it, then go ahead and skip.
674  * Otherwise, the page must be at least all-visible if not
675  * all-frozen, so we can set all_visible_according_to_vm = true.
676  */
677  if (skipping_blocks && !FORCE_CHECK_PAGE())
678  {
679  /*
680  * Tricky, tricky. If this is in aggressive vacuum, the page
681  * must have been all-frozen at the time we checked whether it
682  * was skippable, but it might not be any more. We must be
683  * careful to count it as a skipped all-frozen page in that
684  * case, or else we'll think we can't update relfrozenxid and
685  * relminmxid. If it's not an aggressive vacuum, we don't
686  * know whether it was all-frozen, so we have to recheck; but
687  * in this case an approximate answer is OK.
688  */
689  if (aggressive || VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
690  vacrelstats->frozenskipped_pages++;
691  continue;
692  }
693  all_visible_according_to_vm = true;
694  }
695 
697 
698  /*
699  * If we are close to overrunning the available space for dead-tuple
700  * TIDs, pause and do a cycle of vacuuming before we tackle this page.
701  */
702  if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
703  vacrelstats->num_dead_tuples > 0)
704  {
705  const int hvp_index[] = {
708  };
709  int64 hvp_val[2];
710 
711  /*
712  * Before beginning index vacuuming, we release any pin we may
713  * hold on the visibility map page. This isn't necessary for
714  * correctness, but we do it anyway to avoid holding the pin
715  * across a lengthy, unrelated operation.
716  */
717  if (BufferIsValid(vmbuffer))
718  {
719  ReleaseBuffer(vmbuffer);
720  vmbuffer = InvalidBuffer;
721  }
722 
723  /* Log cleanup info before we touch indexes */
724  vacuum_log_cleanup_info(onerel, vacrelstats);
725 
726  /* Report that we are now vacuuming indexes */
729 
730  /* Remove index entries */
731  for (i = 0; i < nindexes; i++)
732  lazy_vacuum_index(Irel[i],
733  &indstats[i],
734  vacrelstats);
735 
736  /*
737  * Report that we are now vacuuming the heap. We also increase
738  * the number of index scans here; note that by using
739  * pgstat_progress_update_multi_param we can update both
740  * parameters atomically.
741  */
743  hvp_val[1] = vacrelstats->num_index_scans + 1;
744  pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
745 
746  /* Remove tuples from heap */
747  lazy_vacuum_heap(onerel, vacrelstats);
748 
749  /*
750  * Forget the now-vacuumed tuples, and press on, but be careful
751  * not to reset latestRemovedXid since we want that value to be
752  * valid.
753  */
754  vacrelstats->num_dead_tuples = 0;
755  vacrelstats->num_index_scans++;
756 
757  /* Report that we are once again scanning the heap */
760  }
761 
762  /*
763  * Pin the visibility map page in case we need to mark the page
764  * all-visible. In most cases this will be very cheap, because we'll
765  * already have the correct page pinned anyway. However, it's
766  * possible that (a) next_unskippable_block is covered by a different
767  * VM page than the current block or (b) we released our pin and did a
768  * cycle of index vacuuming.
769  *
770  */
771  visibilitymap_pin(onerel, blkno, &vmbuffer);
772 
773  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
774  RBM_NORMAL, vac_strategy);
775 
776  /* We need buffer cleanup lock so that we can prune HOT chains. */
778  {
779  /*
780  * If we're not performing an aggressive scan to guard against XID
781  * wraparound, and we don't want to forcibly check the page, then
782  * it's OK to skip vacuuming pages we get a lock conflict on. They
783  * will be dealt with in some future vacuum.
784  */
785  if (!aggressive && !FORCE_CHECK_PAGE())
786  {
787  ReleaseBuffer(buf);
788  vacrelstats->pinskipped_pages++;
789  continue;
790  }
791 
792  /*
793  * Read the page with share lock to see if any xids on it need to
794  * be frozen. If not we just skip the page, after updating our
795  * scan statistics. If there are some, we wait for cleanup lock.
796  *
797  * We could defer the lock request further by remembering the page
798  * and coming back to it later, or we could even register
799  * ourselves for multiple buffers and then service whichever one
800  * is received first. For now, this seems good enough.
801  *
802  * If we get here with aggressive false, then we're just forcibly
803  * checking the page, and so we don't want to insist on getting
804  * the lock; we only need to know if the page contains tuples, so
805  * that we can update nonempty_pages correctly. It's convenient
806  * to use lazy_check_needs_freeze() for both situations, though.
807  */
809  if (!lazy_check_needs_freeze(buf, &hastup))
810  {
811  UnlockReleaseBuffer(buf);
812  vacrelstats->scanned_pages++;
813  vacrelstats->pinskipped_pages++;
814  if (hastup)
815  vacrelstats->nonempty_pages = blkno + 1;
816  continue;
817  }
818  if (!aggressive)
819  {
820  /*
821  * Here, we must not advance scanned_pages; that would amount
822  * to claiming that the page contains no freezable tuples.
823  */
824  UnlockReleaseBuffer(buf);
825  vacrelstats->pinskipped_pages++;
826  if (hastup)
827  vacrelstats->nonempty_pages = blkno + 1;
828  continue;
829  }
832  /* drop through to normal processing */
833  }
834 
835  vacrelstats->scanned_pages++;
836  vacrelstats->tupcount_pages++;
837 
838  page = BufferGetPage(buf);
839 
840  if (PageIsNew(page))
841  {
842  /*
843  * An all-zeroes page could be left over if a backend extends the
844  * relation but crashes before initializing the page. Reclaim such
845  * pages for use.
846  *
847  * We have to be careful here because we could be looking at a
848  * page that someone has just added to the relation and not yet
849  * been able to initialize (see RelationGetBufferForTuple). To
850  * protect against that, release the buffer lock, grab the
851  * relation extension lock momentarily, and re-lock the buffer. If
852  * the page is still uninitialized by then, it must be left over
853  * from a crashed backend, and we can initialize it.
854  *
855  * We don't really need the relation lock when this is a new or
856  * temp relation, but it's probably not worth the code space to
857  * check that, since this surely isn't a critical path.
858  *
859  * Note: the comparable code in vacuum.c need not worry because
860  * it's got exclusive lock on the whole relation.
861  */
866  if (PageIsNew(page))
867  {
869  (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
870  relname, blkno)));
871  PageInit(page, BufferGetPageSize(buf), 0);
872  empty_pages++;
873  }
874  freespace = PageGetHeapFreeSpace(page);
875  MarkBufferDirty(buf);
876  UnlockReleaseBuffer(buf);
877 
878  RecordPageWithFreeSpace(onerel, blkno, freespace);
879  continue;
880  }
881 
882  if (PageIsEmpty(page))
883  {
884  empty_pages++;
885  freespace = PageGetHeapFreeSpace(page);
886 
887  /* empty pages are always all-visible and all-frozen */
888  if (!PageIsAllVisible(page))
889  {
891 
892  /* mark buffer dirty before writing a WAL record */
893  MarkBufferDirty(buf);
894 
895  /*
896  * It's possible that another backend has extended the heap,
897  * initialized the page, and then failed to WAL-log the page
898  * due to an ERROR. Since heap extension is not WAL-logged,
899  * recovery might try to replay our record setting the page
900  * all-visible and find that the page isn't initialized, which
901  * will cause a PANIC. To prevent that, check whether the
902  * page has been previously WAL-logged, and if not, do that
903  * now.
904  */
905  if (RelationNeedsWAL(onerel) &&
906  PageGetLSN(page) == InvalidXLogRecPtr)
907  log_newpage_buffer(buf, true);
908 
909  PageSetAllVisible(page);
910  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
911  vmbuffer, InvalidTransactionId,
914  }
915 
916  UnlockReleaseBuffer(buf);
917  RecordPageWithFreeSpace(onerel, blkno, freespace);
918  continue;
919  }
920 
921  /*
922  * Prune all HOT-update chains in this page.
923  *
924  * We count tuples removed by the pruning step as removed by VACUUM.
925  */
926  tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
927  &vacrelstats->latestRemovedXid);
928 
929  /*
930  * Now scan the page to collect vacuumable items and check for tuples
931  * requiring freezing.
932  */
933  all_visible = true;
934  has_dead_tuples = false;
935  nfrozen = 0;
936  hastup = false;
937  prev_dead_count = vacrelstats->num_dead_tuples;
938  maxoff = PageGetMaxOffsetNumber(page);
939 
940  /*
941  * Note: If you change anything in the loop below, also look at
942  * heap_page_is_all_visible to see if that needs to be changed.
943  */
944  for (offnum = FirstOffsetNumber;
945  offnum <= maxoff;
946  offnum = OffsetNumberNext(offnum))
947  {
948  ItemId itemid;
949 
950  itemid = PageGetItemId(page, offnum);
951 
952  /* Unused items require no processing, but we count 'em */
953  if (!ItemIdIsUsed(itemid))
954  {
955  nunused += 1;
956  continue;
957  }
958 
959  /* Redirect items mustn't be touched */
960  if (ItemIdIsRedirected(itemid))
961  {
962  hastup = true; /* this page won't be truncatable */
963  continue;
964  }
965 
966  ItemPointerSet(&(tuple.t_self), blkno, offnum);
967 
968  /*
969  * DEAD item pointers are to be vacuumed normally; but we don't
970  * count them in tups_vacuumed, else we'd be double-counting (at
971  * least in the common case where heap_page_prune() just freed up
972  * a non-HOT tuple).
973  */
974  if (ItemIdIsDead(itemid))
975  {
976  lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
977  all_visible = false;
978  continue;
979  }
980 
981  Assert(ItemIdIsNormal(itemid));
982 
983  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
984  tuple.t_len = ItemIdGetLength(itemid);
985  tuple.t_tableOid = RelationGetRelid(onerel);
986 
987  tupgone = false;
988 
989  switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
990  {
991  case HEAPTUPLE_DEAD:
992 
993  /*
994  * Ordinarily, DEAD tuples would have been removed by
995  * heap_page_prune(), but it's possible that the tuple
996  * state changed since heap_page_prune() looked. In
997  * particular an INSERT_IN_PROGRESS tuple could have
998  * changed to DEAD if the inserter aborted. So this
999  * cannot be considered an error condition.
1000  *
1001  * If the tuple is HOT-updated then it must only be
1002  * removed by a prune operation; so we keep it just as if
1003  * it were RECENTLY_DEAD. Also, if it's a heap-only
1004  * tuple, we choose to keep it, because it'll be a lot
1005  * cheaper to get rid of it in the next pruning pass than
1006  * to treat it like an indexed tuple.
1007  */
1008  if (HeapTupleIsHotUpdated(&tuple) ||
1009  HeapTupleIsHeapOnly(&tuple))
1010  nkeep += 1;
1011  else
1012  tupgone = true; /* we can delete the tuple */
1013  all_visible = false;
1014  break;
1015  case HEAPTUPLE_LIVE:
1016  /* Tuple is good --- but let's do some validity checks */
1017  if (onerel->rd_rel->relhasoids &&
1018  !OidIsValid(HeapTupleGetOid(&tuple)))
1019  elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
1020  relname, blkno, offnum);
1021 
1022  /*
1023  * Is the tuple definitely visible to all transactions?
1024  *
1025  * NB: Like with per-tuple hint bits, we can't set the
1026  * PD_ALL_VISIBLE flag if the inserter committed
1027  * asynchronously. See SetHintBits for more info. Check
1028  * that the tuple is hinted xmin-committed because of
1029  * that.
1030  */
1031  if (all_visible)
1032  {
1033  TransactionId xmin;
1034 
1036  {
1037  all_visible = false;
1038  break;
1039  }
1040 
1041  /*
1042  * The inserter definitely committed. But is it old
1043  * enough that everyone sees it as committed?
1044  */
1045  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1046  if (!TransactionIdPrecedes(xmin, OldestXmin))
1047  {
1048  all_visible = false;
1049  break;
1050  }
1051 
1052  /* Track newest xmin on page. */
1053  if (TransactionIdFollows(xmin, visibility_cutoff_xid))
1054  visibility_cutoff_xid = xmin;
1055  }
1056  break;
1058 
1059  /*
1060  * If tuple is recently deleted then we must not remove it
1061  * from relation.
1062  */
1063  nkeep += 1;
1064  all_visible = false;
1065  break;
1067  /* This is an expected case during concurrent vacuum */
1068  all_visible = false;
1069  break;
1071  /* This is an expected case during concurrent vacuum */
1072  all_visible = false;
1073  break;
1074  default:
1075  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1076  break;
1077  }
1078 
1079  if (tupgone)
1080  {
1081  lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
1083  &vacrelstats->latestRemovedXid);
1084  tups_vacuumed += 1;
1085  has_dead_tuples = true;
1086  }
1087  else
1088  {
1089  bool tuple_totally_frozen;
1090 
1091  num_tuples += 1;
1092  hastup = true;
1093 
1094  /*
1095  * Each non-removable tuple must be checked to see if it needs
1096  * freezing. Note we already have exclusive buffer lock.
1097  */
1099  MultiXactCutoff, &frozen[nfrozen],
1100  &tuple_totally_frozen))
1101  frozen[nfrozen++].offset = offnum;
1102 
1103  if (!tuple_totally_frozen)
1104  all_frozen = false;
1105  }
1106  } /* scan along page */
1107 
1108  /*
1109  * If we froze any tuples, mark the buffer dirty, and write a WAL
1110  * record recording the changes. We must log the changes to be
1111  * crash-safe against future truncation of CLOG.
1112  */
1113  if (nfrozen > 0)
1114  {
1116 
1117  MarkBufferDirty(buf);
1118 
1119  /* execute collected freezes */
1120  for (i = 0; i < nfrozen; i++)
1121  {
1122  ItemId itemid;
1123  HeapTupleHeader htup;
1124 
1125  itemid = PageGetItemId(page, frozen[i].offset);
1126  htup = (HeapTupleHeader) PageGetItem(page, itemid);
1127 
1128  heap_execute_freeze_tuple(htup, &frozen[i]);
1129  }
1130 
1131  /* Now WAL-log freezing if necessary */
1132  if (RelationNeedsWAL(onerel))
1133  {
1134  XLogRecPtr recptr;
1135 
1136  recptr = log_heap_freeze(onerel, buf, FreezeLimit,
1137  frozen, nfrozen);
1138  PageSetLSN(page, recptr);
1139  }
1140 
1141  END_CRIT_SECTION();
1142  }
1143 
1144  /*
1145  * If there are no indexes then we can vacuum the page right now
1146  * instead of doing a second scan.
1147  */
1148  if (nindexes == 0 &&
1149  vacrelstats->num_dead_tuples > 0)
1150  {
1151  /* Remove tuples from heap */
1152  lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
1153  has_dead_tuples = false;
1154 
1155  /*
1156  * Forget the now-vacuumed tuples, and press on, but be careful
1157  * not to reset latestRemovedXid since we want that value to be
1158  * valid.
1159  */
1160  vacrelstats->num_dead_tuples = 0;
1161  vacuumed_pages++;
1162  }
1163 
1164  freespace = PageGetHeapFreeSpace(page);
1165 
1166  /* mark page all-visible, if appropriate */
1167  if (all_visible && !all_visible_according_to_vm)
1168  {
1170 
1171  if (all_frozen)
1172  flags |= VISIBILITYMAP_ALL_FROZEN;
1173 
1174  /*
1175  * It should never be the case that the visibility map page is set
1176  * while the page-level bit is clear, but the reverse is allowed
1177  * (if checksums are not enabled). Regardless, set the both bits
1178  * so that we get back in sync.
1179  *
1180  * NB: If the heap page is all-visible but the VM bit is not set,
1181  * we don't need to dirty the heap page. However, if checksums
1182  * are enabled, we do need to make sure that the heap page is
1183  * dirtied before passing it to visibilitymap_set(), because it
1184  * may be logged. Given that this situation should only happen in
1185  * rare cases after a crash, it is not worth optimizing.
1186  */
1187  PageSetAllVisible(page);
1188  MarkBufferDirty(buf);
1189  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1190  vmbuffer, visibility_cutoff_xid, flags);
1191  }
1192 
1193  /*
1194  * As of PostgreSQL 9.2, the visibility map bit should never be set if
1195  * the page-level bit is clear. However, it's possible that the bit
1196  * got cleared after we checked it and before we took the buffer
1197  * content lock, so we must recheck before jumping to the conclusion
1198  * that something bad has happened.
1199  */
1200  else if (all_visible_according_to_vm && !PageIsAllVisible(page)
1201  && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
1202  {
1203  elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1204  relname, blkno);
1205  visibilitymap_clear(onerel, blkno, vmbuffer,
1207  }
1208 
1209  /*
1210  * It's possible for the value returned by GetOldestXmin() to move
1211  * backwards, so it's not wrong for us to see tuples that appear to
1212  * not be visible to everyone yet, while PD_ALL_VISIBLE is already
1213  * set. The real safe xmin value never moves backwards, but
1214  * GetOldestXmin() is conservative and sometimes returns a value
1215  * that's unnecessarily small, so if we see that contradiction it just
1216  * means that the tuples that we think are not visible to everyone yet
1217  * actually are, and the PD_ALL_VISIBLE flag is correct.
1218  *
1219  * There should never be dead tuples on a page with PD_ALL_VISIBLE
1220  * set, however.
1221  */
1222  else if (PageIsAllVisible(page) && has_dead_tuples)
1223  {
1224  elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
1225  relname, blkno);
1226  PageClearAllVisible(page);
1227  MarkBufferDirty(buf);
1228  visibilitymap_clear(onerel, blkno, vmbuffer,
1230  }
1231 
1232  /*
1233  * If the all-visible page is turned out to be all-frozen but not
1234  * marked, we should so mark it. Note that all_frozen is only valid
1235  * if all_visible is true, so we must check both.
1236  */
1237  else if (all_visible_according_to_vm && all_visible && all_frozen &&
1238  !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
1239  {
1240  /*
1241  * We can pass InvalidTransactionId as the cutoff XID here,
1242  * because setting the all-frozen bit doesn't cause recovery
1243  * conflicts.
1244  */
1245  visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1246  vmbuffer, InvalidTransactionId,
1248  }
1249 
1250  UnlockReleaseBuffer(buf);
1251 
1252  /* Remember the location of the last page with nonremovable tuples */
1253  if (hastup)
1254  vacrelstats->nonempty_pages = blkno + 1;
1255 
1256  /*
1257  * If we remembered any tuples for deletion, then the page will be
1258  * visited again by lazy_vacuum_heap, which will compute and record
1259  * its post-compaction free space. If not, then we're done with this
1260  * page, so remember its free space as-is. (This path will always be
1261  * taken if there are no indexes.)
1262  */
1263  if (vacrelstats->num_dead_tuples == prev_dead_count)
1264  RecordPageWithFreeSpace(onerel, blkno, freespace);
1265  }
1266 
1267  /* report that everything is scanned and vacuumed */
1269 
1270  pfree(frozen);
1271 
1272  /* save stats for use later */
1273  vacrelstats->scanned_tuples = num_tuples;
1274  vacrelstats->tuples_deleted = tups_vacuumed;
1275  vacrelstats->new_dead_tuples = nkeep;
1276 
1277  /* now we can compute the new value for pg_class.reltuples */
1278  vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
1279  nblocks,
1280  vacrelstats->tupcount_pages,
1281  num_tuples);
1282 
1283  /*
1284  * Release any remaining pin on visibility map page.
1285  */
1286  if (BufferIsValid(vmbuffer))
1287  {
1288  ReleaseBuffer(vmbuffer);
1289  vmbuffer = InvalidBuffer;
1290  }
1291 
1292  /* If any tuples need to be deleted, perform final vacuum cycle */
1293  /* XXX put a threshold on min number of tuples here? */
1294  if (vacrelstats->num_dead_tuples > 0)
1295  {
1296  const int hvp_index[] = {
1299  };
1300  int64 hvp_val[2];
1301 
1302  /* Log cleanup info before we touch indexes */
1303  vacuum_log_cleanup_info(onerel, vacrelstats);
1304 
1305  /* Report that we are now vacuuming indexes */
1308 
1309  /* Remove index entries */
1310  for (i = 0; i < nindexes; i++)
1311  lazy_vacuum_index(Irel[i],
1312  &indstats[i],
1313  vacrelstats);
1314 
1315  /* Report that we are now vacuuming the heap */
1316  hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
1317  hvp_val[1] = vacrelstats->num_index_scans + 1;
1318  pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
1319 
1320  /* Remove tuples from heap */
1323  lazy_vacuum_heap(onerel, vacrelstats);
1324  vacrelstats->num_index_scans++;
1325  }
1326 
1327  /* report all blocks vacuumed; and that we're cleaning up */
1331 
1332  /* Do post-vacuum cleanup and statistics update for each index */
1333  for (i = 0; i < nindexes; i++)
1334  lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
1335 
1336  /* If no indexes, make log report that lazy_vacuum_heap would've made */
1337  if (vacuumed_pages)
1338  ereport(elevel,
1339  (errmsg("\"%s\": removed %.0f row versions in %u pages",
1340  RelationGetRelationName(onerel),
1341  tups_vacuumed, vacuumed_pages)));
1342 
1343  /*
1344  * This is pretty messy, but we split it up so that we can skip emitting
1345  * individual parts of the message when not applicable.
1346  */
1347  initStringInfo(&buf);
1348  appendStringInfo(&buf,
1349  _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
1350  nkeep, OldestXmin);
1351  appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
1352  nunused);
1353  appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
1354  "Skipped %u pages due to buffer pins, ",
1355  vacrelstats->pinskipped_pages),
1356  vacrelstats->pinskipped_pages);
1357  appendStringInfo(&buf, ngettext("%u frozen page.\n",
1358  "%u frozen pages.\n",
1359  vacrelstats->frozenskipped_pages),
1360  vacrelstats->frozenskipped_pages);
1361  appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
1362  "%u pages are entirely empty.\n",
1363  empty_pages),
1364  empty_pages);
1365  appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0));
1366 
1367  ereport(elevel,
1368  (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
1369  RelationGetRelationName(onerel),
1370  tups_vacuumed, num_tuples,
1371  vacrelstats->scanned_pages, nblocks),
1372  errdetail_internal("%s", buf.data)));
1373  pfree(buf.data);
1374 }
1375 
1376 
1377 /*
1378  * lazy_vacuum_heap() -- second pass over the heap
1379  *
1380  * This routine marks dead tuples as unused and compacts out free
1381  * space on their pages. Pages not having dead tuples recorded from
1382  * lazy_scan_heap are not visited at all.
1383  *
1384  * Note: the reason for doing this as a second pass is we cannot remove
1385  * the tuples until we've removed their index entries, and we want to
1386  * process index entry removal in batches as large as possible.
1387  */
1388 static void
1389 lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
1390 {
1391  int tupindex;
1392  int npages;
1393  PGRUsage ru0;
1394  Buffer vmbuffer = InvalidBuffer;
1395 
1396  pg_rusage_init(&ru0);
1397  npages = 0;
1398 
1399  tupindex = 0;
1400  while (tupindex < vacrelstats->num_dead_tuples)
1401  {
1402  BlockNumber tblk;
1403  Buffer buf;
1404  Page page;
1405  Size freespace;
1406 
1408 
1409  tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1410  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
1411  vac_strategy);
1413  {
1414  ReleaseBuffer(buf);
1415  ++tupindex;
1416  continue;
1417  }
1418  tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
1419  &vmbuffer);
1420 
1421  /* Now that we've compacted the page, record its available space */
1422  page = BufferGetPage(buf);
1423  freespace = PageGetHeapFreeSpace(page);
1424 
1425  UnlockReleaseBuffer(buf);
1426  RecordPageWithFreeSpace(onerel, tblk, freespace);
1427  npages++;
1428  }
1429 
1430  if (BufferIsValid(vmbuffer))
1431  {
1432  ReleaseBuffer(vmbuffer);
1433  vmbuffer = InvalidBuffer;
1434  }
1435 
1436  ereport(elevel,
1437  (errmsg("\"%s\": removed %d row versions in %d pages",
1438  RelationGetRelationName(onerel),
1439  tupindex, npages),
1440  errdetail_internal("%s", pg_rusage_show(&ru0))));
1441 }
1442 
1443 /*
1444  * lazy_vacuum_page() -- free dead tuples on a page
1445  * and repair its fragmentation.
1446  *
1447  * Caller must hold pin and buffer cleanup lock on the buffer.
1448  *
1449  * tupindex is the index in vacrelstats->dead_tuples of the first dead
1450  * tuple for this page. We assume the rest follow sequentially.
1451  * The return value is the first tupindex after the tuples of this page.
1452  */
1453 static int
1455  int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
1456 {
1457  Page page = BufferGetPage(buffer);
1458  OffsetNumber unused[MaxOffsetNumber];
1459  int uncnt = 0;
1460  TransactionId visibility_cutoff_xid;
1461  bool all_frozen;
1462 
1464 
1466 
1467  for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
1468  {
1469  BlockNumber tblk;
1470  OffsetNumber toff;
1471  ItemId itemid;
1472 
1473  tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1474  if (tblk != blkno)
1475  break; /* past end of tuples for this block */
1476  toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
1477  itemid = PageGetItemId(page, toff);
1478  ItemIdSetUnused(itemid);
1479  unused[uncnt++] = toff;
1480  }
1481 
1483 
1484  /*
1485  * Mark buffer dirty before we write WAL.
1486  */
1487  MarkBufferDirty(buffer);
1488 
1489  /* XLOG stuff */
1490  if (RelationNeedsWAL(onerel))
1491  {
1492  XLogRecPtr recptr;
1493 
1494  recptr = log_heap_clean(onerel, buffer,
1495  NULL, 0, NULL, 0,
1496  unused, uncnt,
1497  vacrelstats->latestRemovedXid);
1498  PageSetLSN(page, recptr);
1499  }
1500 
1501  /*
1502  * End critical section, so we safely can do visibility tests (which
1503  * possibly need to perform IO and allocate memory!). If we crash now the
1504  * page (including the corresponding vm bit) might not be marked all
1505  * visible, but that's fine. A later vacuum will fix that.
1506  */
1507  END_CRIT_SECTION();
1508 
1509  /*
1510  * Now that we have removed the dead tuples from the page, once again
1511  * check if the page has become all-visible. The page is already marked
1512  * dirty, exclusively locked, and, if needed, a full page image has been
1513  * emitted in the log_heap_clean() above.
1514  */
1515  if (heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid,
1516  &all_frozen))
1517  PageSetAllVisible(page);
1518 
1519  /*
1520  * All the changes to the heap page have been done. If the all-visible
1521  * flag is now set, also set the VM all-visible bit (and, if possible, the
1522  * all-frozen bit) unless this has already been done previously.
1523  */
1524  if (PageIsAllVisible(page))
1525  {
1526  uint8 vm_status = visibilitymap_get_status(onerel, blkno, vmbuffer);
1527  uint8 flags = 0;
1528 
1529  /* Set the VM all-frozen bit to flag, if needed */
1530  if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
1531  flags |= VISIBILITYMAP_ALL_VISIBLE;
1532  if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
1533  flags |= VISIBILITYMAP_ALL_FROZEN;
1534 
1535  Assert(BufferIsValid(*vmbuffer));
1536  if (flags != 0)
1537  visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr,
1538  *vmbuffer, visibility_cutoff_xid, flags);
1539  }
1540 
1541  return tupindex;
1542 }
1543 
1544 /*
1545  * lazy_check_needs_freeze() -- scan page to see if any tuples
1546  * need to be cleaned to avoid wraparound
1547  *
1548  * Returns true if the page needs to be vacuumed using cleanup lock.
1549  * Also returns a flag indicating whether page contains any tuples at all.
1550  */
1551 static bool
1553 {
1554  Page page = BufferGetPage(buf);
1555  OffsetNumber offnum,
1556  maxoff;
1557  HeapTupleHeader tupleheader;
1558 
1559  *hastup = false;
1560 
1561  /* If we hit an uninitialized page, we want to force vacuuming it. */
1562  if (PageIsNew(page))
1563  return true;
1564 
1565  /* Quick out for ordinary empty page. */
1566  if (PageIsEmpty(page))
1567  return false;
1568 
1569  maxoff = PageGetMaxOffsetNumber(page);
1570  for (offnum = FirstOffsetNumber;
1571  offnum <= maxoff;
1572  offnum = OffsetNumberNext(offnum))
1573  {
1574  ItemId itemid;
1575 
1576  itemid = PageGetItemId(page, offnum);
1577 
1578  /* this should match hastup test in count_nondeletable_pages() */
1579  if (ItemIdIsUsed(itemid))
1580  *hastup = true;
1581 
1582  /* dead and redirect items never need freezing */
1583  if (!ItemIdIsNormal(itemid))
1584  continue;
1585 
1586  tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1587 
1588  if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
1589  MultiXactCutoff, buf))
1590  return true;
1591  } /* scan along page */
1592 
1593  return false;
1594 }
1595 
1596 
1597 /*
1598  * lazy_vacuum_index() -- vacuum one index relation.
1599  *
1600  * Delete all the index entries pointing to tuples listed in
1601  * vacrelstats->dead_tuples, and update running statistics.
1602  */
1603 static void
1605  IndexBulkDeleteResult **stats,
1606  LVRelStats *vacrelstats)
1607 {
1608  IndexVacuumInfo ivinfo;
1609  PGRUsage ru0;
1610 
1611  pg_rusage_init(&ru0);
1612 
1613  ivinfo.index = indrel;
1614  ivinfo.analyze_only = false;
1615  ivinfo.estimated_count = true;
1616  ivinfo.message_level = elevel;
1617  ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples;
1618  ivinfo.strategy = vac_strategy;
1619 
1620  /* Do bulk deletion */
1621  *stats = index_bulk_delete(&ivinfo, *stats,
1622  lazy_tid_reaped, (void *) vacrelstats);
1623 
1624  ereport(elevel,
1625  (errmsg("scanned index \"%s\" to remove %d row versions",
1626  RelationGetRelationName(indrel),
1627  vacrelstats->num_dead_tuples),
1628  errdetail_internal("%s", pg_rusage_show(&ru0))));
1629 }
1630 
1631 /*
1632  * lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
1633  */
1634 static void
1636  IndexBulkDeleteResult *stats,
1637  LVRelStats *vacrelstats)
1638 {
1639  IndexVacuumInfo ivinfo;
1640  PGRUsage ru0;
1641 
1642  pg_rusage_init(&ru0);
1643 
1644  ivinfo.index = indrel;
1645  ivinfo.analyze_only = false;
1646  ivinfo.estimated_count = (vacrelstats->tupcount_pages < vacrelstats->rel_pages);
1647  ivinfo.message_level = elevel;
1648  ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
1649  ivinfo.strategy = vac_strategy;
1650 
1651  stats = index_vacuum_cleanup(&ivinfo, stats);
1652 
1653  if (!stats)
1654  return;
1655 
1656  /*
1657  * Now update statistics in pg_class, but only if the index says the count
1658  * is accurate.
1659  */
1660  if (!stats->estimated_count)
1661  vac_update_relstats(indrel,
1662  stats->num_pages,
1663  stats->num_index_tuples,
1664  0,
1665  false,
1668  false);
1669 
1670  ereport(elevel,
1671  (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
1672  RelationGetRelationName(indrel),
1673  stats->num_index_tuples,
1674  stats->num_pages),
1675  errdetail("%.0f index row versions were removed.\n"
1676  "%u index pages have been deleted, %u are currently reusable.\n"
1677  "%s.",
1678  stats->tuples_removed,
1679  stats->pages_deleted, stats->pages_free,
1680  pg_rusage_show(&ru0))));
1681 
1682  pfree(stats);
1683 }
1684 
1685 /*
1686  * should_attempt_truncation - should we attempt to truncate the heap?
1687  *
1688  * Don't even think about it unless we have a shot at releasing a goodly
1689  * number of pages. Otherwise, the time taken isn't worth it.
1690  *
1691  * Also don't attempt it if we are doing early pruning/vacuuming, because a
1692  * scan which cannot find a truncated heap page cannot determine that the
1693  * snapshot is too old to read that page. We might be able to get away with
1694  * truncating all except one of the pages, setting its LSN to (at least) the
1695  * maximum of the truncated range if we also treated an index leaf tuple
1696  * pointing to a missing heap page as something to trigger the "snapshot too
1697  * old" error, but that seems fragile and seems like it deserves its own patch
1698  * if we consider it.
1699  *
1700  * This is split out so that we can test whether truncation is going to be
1701  * called for before we actually do it. If you change the logic here, be
1702  * careful to depend only on fields that lazy_scan_heap updates on-the-fly.
1703  */
1704 static bool
1706 {
1707  BlockNumber possibly_freeable;
1708 
1709  possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
1710  if (possibly_freeable > 0 &&
1711  (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
1712  possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
1714  return true;
1715  else
1716  return false;
1717 }
1718 
1719 /*
1720  * lazy_truncate_heap - try to truncate off any empty pages at the end
1721  */
1722 static void
1724 {
1725  BlockNumber old_rel_pages = vacrelstats->rel_pages;
1726  BlockNumber new_rel_pages;
1727  PGRUsage ru0;
1728  int lock_retry;
1729 
1730  pg_rusage_init(&ru0);
1731 
1732  /* Report that we are now truncating */
1735 
1736  /*
1737  * Loop until no more truncating can be done.
1738  */
1739  do
1740  {
1741  /*
1742  * We need full exclusive lock on the relation in order to do
1743  * truncation. If we can't get it, give up rather than waiting --- we
1744  * don't want to block other backends, and we don't want to deadlock
1745  * (which is quite possible considering we already hold a lower-grade
1746  * lock).
1747  */
1748  vacrelstats->lock_waiter_detected = false;
1749  lock_retry = 0;
1750  while (true)
1751  {
1753  break;
1754 
1755  /*
1756  * Check for interrupts while trying to (re-)acquire the exclusive
1757  * lock.
1758  */
1760 
1761  if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
1763  {
1764  /*
1765  * We failed to establish the lock in the specified number of
1766  * retries. This means we give up truncating.
1767  */
1768  vacrelstats->lock_waiter_detected = true;
1769  ereport(elevel,
1770  (errmsg("\"%s\": stopping truncate due to conflicting lock request",
1771  RelationGetRelationName(onerel))));
1772  return;
1773  }
1774 
1776  }
1777 
1778  /*
1779  * Now that we have exclusive lock, look to see if the rel has grown
1780  * whilst we were vacuuming with non-exclusive lock. If so, give up;
1781  * the newly added pages presumably contain non-deletable tuples.
1782  */
1783  new_rel_pages = RelationGetNumberOfBlocks(onerel);
1784  if (new_rel_pages != old_rel_pages)
1785  {
1786  /*
1787  * Note: we intentionally don't update vacrelstats->rel_pages with
1788  * the new rel size here. If we did, it would amount to assuming
1789  * that the new pages are empty, which is unlikely. Leaving the
1790  * numbers alone amounts to assuming that the new pages have the
1791  * same tuple density as existing ones, which is less unlikely.
1792  */
1794  return;
1795  }
1796 
1797  /*
1798  * Scan backwards from the end to verify that the end pages actually
1799  * contain no tuples. This is *necessary*, not optional, because
1800  * other backends could have added tuples to these pages whilst we
1801  * were vacuuming.
1802  */
1803  new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1804 
1805  if (new_rel_pages >= old_rel_pages)
1806  {
1807  /* can't do anything after all */
1809  return;
1810  }
1811 
1812  /*
1813  * Okay to truncate.
1814  */
1815  RelationTruncate(onerel, new_rel_pages);
1816 
1817  /*
1818  * We can release the exclusive lock as soon as we have truncated.
1819  * Other backends can't safely access the relation until they have
1820  * processed the smgr invalidation that smgrtruncate sent out ... but
1821  * that should happen as part of standard invalidation processing once
1822  * they acquire lock on the relation.
1823  */
1825 
1826  /*
1827  * Update statistics. Here, it *is* correct to adjust rel_pages
1828  * without also touching reltuples, since the tuple count wasn't
1829  * changed by the truncation.
1830  */
1831  vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
1832  vacrelstats->rel_pages = new_rel_pages;
1833 
1834  ereport(elevel,
1835  (errmsg("\"%s\": truncated %u to %u pages",
1836  RelationGetRelationName(onerel),
1837  old_rel_pages, new_rel_pages),
1838  errdetail_internal("%s",
1839  pg_rusage_show(&ru0))));
1840  old_rel_pages = new_rel_pages;
1841  } while (new_rel_pages > vacrelstats->nonempty_pages &&
1842  vacrelstats->lock_waiter_detected);
1843 }
1844 
1845 /*
1846  * Rescan end pages to verify that they are (still) empty of tuples.
1847  *
1848  * Returns number of nondeletable pages (last nonempty page + 1).
1849  */
1850 static BlockNumber
1852 {
1853  BlockNumber blkno;
1854  BlockNumber prefetchedUntil;
1855  instr_time starttime;
1856 
1857  /* Initialize the starttime if we check for conflicting lock requests */
1858  INSTR_TIME_SET_CURRENT(starttime);
1859 
1860  /*
1861  * Start checking blocks at what we believe relation end to be and move
1862  * backwards. (Strange coding of loop control is needed because blkno is
1863  * unsigned.) To make the scan faster, we prefetch a few blocks at a time
1864  * in forward direction, so that OS-level readahead can kick in.
1865  */
1866  blkno = vacrelstats->rel_pages;
1868  "prefetch size must be power of 2");
1869  prefetchedUntil = InvalidBlockNumber;
1870  while (blkno > vacrelstats->nonempty_pages)
1871  {
1872  Buffer buf;
1873  Page page;
1874  OffsetNumber offnum,
1875  maxoff;
1876  bool hastup;
1877 
1878  /*
1879  * Check if another process requests a lock on our relation. We are
1880  * holding an AccessExclusiveLock here, so they will be waiting. We
1881  * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
1882  * only check if that interval has elapsed once every 32 blocks to
1883  * keep the number of system calls and actual shared lock table
1884  * lookups to a minimum.
1885  */
1886  if ((blkno % 32) == 0)
1887  {
1888  instr_time currenttime;
1889  instr_time elapsed;
1890 
1891  INSTR_TIME_SET_CURRENT(currenttime);
1892  elapsed = currenttime;
1893  INSTR_TIME_SUBTRACT(elapsed, starttime);
1894  if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
1896  {
1898  {
1899  ereport(elevel,
1900  (errmsg("\"%s\": suspending truncate due to conflicting lock request",
1901  RelationGetRelationName(onerel))));
1902 
1903  vacrelstats->lock_waiter_detected = true;
1904  return blkno;
1905  }
1906  starttime = currenttime;
1907  }
1908  }
1909 
1910  /*
1911  * We don't insert a vacuum delay point here, because we have an
1912  * exclusive lock on the table which we want to hold for as short a
1913  * time as possible. We still need to check for interrupts however.
1914  */
1916 
1917  blkno--;
1918 
1919  /* If we haven't prefetched this lot yet, do so now. */
1920  if (prefetchedUntil > blkno)
1921  {
1922  BlockNumber prefetchStart;
1923  BlockNumber pblkno;
1924 
1925  prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
1926  for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
1927  {
1928  PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno);
1930  }
1931  prefetchedUntil = prefetchStart;
1932  }
1933 
1934  buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
1935  RBM_NORMAL, vac_strategy);
1936 
1937  /* In this phase we only need shared access to the buffer */
1939 
1940  page = BufferGetPage(buf);
1941 
1942  if (PageIsNew(page) || PageIsEmpty(page))
1943  {
1944  /* PageIsNew probably shouldn't happen... */
1945  UnlockReleaseBuffer(buf);
1946  continue;
1947  }
1948 
1949  hastup = false;
1950  maxoff = PageGetMaxOffsetNumber(page);
1951  for (offnum = FirstOffsetNumber;
1952  offnum <= maxoff;
1953  offnum = OffsetNumberNext(offnum))
1954  {
1955  ItemId itemid;
1956 
1957  itemid = PageGetItemId(page, offnum);
1958 
1959  /*
1960  * Note: any non-unused item should be taken as a reason to keep
1961  * this page. We formerly thought that DEAD tuples could be
1962  * thrown away, but that's not so, because we'd not have cleaned
1963  * out their index entries.
1964  */
1965  if (ItemIdIsUsed(itemid))
1966  {
1967  hastup = true;
1968  break; /* can stop scanning */
1969  }
1970  } /* scan along page */
1971 
1972  UnlockReleaseBuffer(buf);
1973 
1974  /* Done scanning if we found a tuple here */
1975  if (hastup)
1976  return blkno + 1;
1977  }
1978 
1979  /*
1980  * If we fall out of the loop, all the previously-thought-to-be-empty
1981  * pages still are; we need not bother to look at the last known-nonempty
1982  * page.
1983  */
1984  return vacrelstats->nonempty_pages;
1985 }
1986 
1987 /*
1988  * lazy_space_alloc - space allocation decisions for lazy vacuum
1989  *
1990  * See the comments at the head of this file for rationale.
1991  */
1992 static void
1993 lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
1994 {
1995  long maxtuples;
1996  int vac_work_mem = IsAutoVacuumWorkerProcess() &&
1997  autovacuum_work_mem != -1 ?
1999 
2000  if (vacrelstats->hasindex)
2001  {
2002  maxtuples = (vac_work_mem * 1024L) / sizeof(ItemPointerData);
2003  maxtuples = Min(maxtuples, INT_MAX);
2004  maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
2005 
2006  /* curious coding here to ensure the multiplication can't overflow */
2007  if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
2008  maxtuples = relblocks * LAZY_ALLOC_TUPLES;
2009 
2010  /* stay sane if small maintenance_work_mem */
2011  maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
2012  }
2013  else
2014  {
2015  maxtuples = MaxHeapTuplesPerPage;
2016  }
2017 
2018  vacrelstats->num_dead_tuples = 0;
2019  vacrelstats->max_dead_tuples = (int) maxtuples;
2020  vacrelstats->dead_tuples = (ItemPointer)
2021  palloc(maxtuples * sizeof(ItemPointerData));
2022 }
2023 
2024 /*
2025  * lazy_record_dead_tuple - remember one deletable tuple
2026  */
2027 static void
2029  ItemPointer itemptr)
2030 {
2031  /*
2032  * The array shouldn't overflow under normal behavior, but perhaps it
2033  * could if we are given a really small maintenance_work_mem. In that
2034  * case, just forget the last few tuples (we'll get 'em next time).
2035  */
2036  if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
2037  {
2038  vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
2039  vacrelstats->num_dead_tuples++;
2041  vacrelstats->num_dead_tuples);
2042  }
2043 }
2044 
2045 /*
2046  * lazy_tid_reaped() -- is a particular tid deletable?
2047  *
2048  * This has the right signature to be an IndexBulkDeleteCallback.
2049  *
2050  * Assumes dead_tuples array is in sorted order.
2051  */
2052 static bool
2054 {
2055  LVRelStats *vacrelstats = (LVRelStats *) state;
2056  ItemPointer res;
2057 
2058  res = (ItemPointer) bsearch((void *) itemptr,
2059  (void *) vacrelstats->dead_tuples,
2060  vacrelstats->num_dead_tuples,
2061  sizeof(ItemPointerData),
2062  vac_cmp_itemptr);
2063 
2064  return (res != NULL);
2065 }
2066 
2067 /*
2068  * Comparator routines for use with qsort() and bsearch().
2069  */
2070 static int
2071 vac_cmp_itemptr(const void *left, const void *right)
2072 {
2073  BlockNumber lblk,
2074  rblk;
2075  OffsetNumber loff,
2076  roff;
2077 
2078  lblk = ItemPointerGetBlockNumber((ItemPointer) left);
2079  rblk = ItemPointerGetBlockNumber((ItemPointer) right);
2080 
2081  if (lblk < rblk)
2082  return -1;
2083  if (lblk > rblk)
2084  return 1;
2085 
2086  loff = ItemPointerGetOffsetNumber((ItemPointer) left);
2087  roff = ItemPointerGetOffsetNumber((ItemPointer) right);
2088 
2089  if (loff < roff)
2090  return -1;
2091  if (loff > roff)
2092  return 1;
2093 
2094  return 0;
2095 }
2096 
2097 /*
2098  * Check if every tuple in the given page is visible to all current and future
2099  * transactions. Also return the visibility_cutoff_xid which is the highest
2100  * xmin amongst the visible tuples. Set *all_frozen to true if every tuple
2101  * on this page is frozen.
2102  */
2103 static bool
2105  TransactionId *visibility_cutoff_xid,
2106  bool *all_frozen)
2107 {
2108  Page page = BufferGetPage(buf);
2109  BlockNumber blockno = BufferGetBlockNumber(buf);
2110  OffsetNumber offnum,
2111  maxoff;
2112  bool all_visible = true;
2113 
2114  *visibility_cutoff_xid = InvalidTransactionId;
2115  *all_frozen = true;
2116 
2117  /*
2118  * This is a stripped down version of the line pointer scan in
2119  * lazy_scan_heap(). So if you change anything here, also check that code.
2120  */
2121  maxoff = PageGetMaxOffsetNumber(page);
2122  for (offnum = FirstOffsetNumber;
2123  offnum <= maxoff && all_visible;
2124  offnum = OffsetNumberNext(offnum))
2125  {
2126  ItemId itemid;
2127  HeapTupleData tuple;
2128 
2129  itemid = PageGetItemId(page, offnum);
2130 
2131  /* Unused or redirect line pointers are of no interest */
2132  if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
2133  continue;
2134 
2135  ItemPointerSet(&(tuple.t_self), blockno, offnum);
2136 
2137  /*
2138  * Dead line pointers can have index pointers pointing to them. So
2139  * they can't be treated as visible
2140  */
2141  if (ItemIdIsDead(itemid))
2142  {
2143  all_visible = false;
2144  *all_frozen = false;
2145  break;
2146  }
2147 
2148  Assert(ItemIdIsNormal(itemid));
2149 
2150  tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2151  tuple.t_len = ItemIdGetLength(itemid);
2152  tuple.t_tableOid = RelationGetRelid(rel);
2153 
2154  switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
2155  {
2156  case HEAPTUPLE_LIVE:
2157  {
2158  TransactionId xmin;
2159 
2160  /* Check comments in lazy_scan_heap. */
2162  {
2163  all_visible = false;
2164  *all_frozen = false;
2165  break;
2166  }
2167 
2168  /*
2169  * The inserter definitely committed. But is it old enough
2170  * that everyone sees it as committed?
2171  */
2172  xmin = HeapTupleHeaderGetXmin(tuple.t_data);
2173  if (!TransactionIdPrecedes(xmin, OldestXmin))
2174  {
2175  all_visible = false;
2176  *all_frozen = false;
2177  break;
2178  }
2179 
2180  /* Track newest xmin on page. */
2181  if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
2182  *visibility_cutoff_xid = xmin;
2183 
2184  /* Check whether this tuple is already frozen or not */
2185  if (all_visible && *all_frozen &&
2187  *all_frozen = false;
2188  }
2189  break;
2190 
2191  case HEAPTUPLE_DEAD:
2195  {
2196  all_visible = false;
2197  *all_frozen = false;
2198  break;
2199  }
2200  default:
2201  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2202  break;
2203  }
2204  } /* scan along page */
2205 
2206  return all_visible;
2207 }
int autovacuum_work_mem
Definition: autovacuum.c:116
double new_rel_tuples
Definition: vacuumlazy.c:119
void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, TransactionId *latestRemovedXid)
Definition: heapam.c:7333
XLogRecPtr log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
Definition: heapam.c:7372
int multixact_freeze_table_age
Definition: vacuum.h:142
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:1637
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
Definition: vacuumlazy.c:1454
int heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin, bool report_stats, TransactionId *latestRemovedXid)
Definition: pruneheap.c:181
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3603
XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
Definition: heapam.c:7452
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define PageIsEmpty(page)
Definition: bufpage.h:218
static BlockNumber count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1851
double tuples_removed
Definition: genam.h:77
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1009
BlockNumber rel_pages
Definition: vacuumlazy.c:112
OffsetNumber offset
Definition: heapam_xlog.h:299
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1389
static bool lazy_tid_reaped(ItemPointer itemptr, void *state)
Definition: vacuumlazy.c:2053
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
double vac_estimate_reltuples(Relation relation, bool is_analyze, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:777
static void lazy_record_dead_tuple(LVRelStats *vacrelstats, ItemPointer itemptr)
Definition: vacuumlazy.c:2028
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:334
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:445
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:257
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:181
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
Definition: pgstat.c:3072
#define PROGRESS_VACUUM_MAX_DEAD_TUPLES
Definition: progress.h:26
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:31
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1570
double tuples_deleted
Definition: vacuumlazy.c:122
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
#define ExclusiveLock
Definition: lockdefs.h:44
int64 TimestampTz
Definition: timestamp.h:39
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
int VacuumPageHit
Definition: globals.c:134
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3093
double old_rel_tuples
Definition: vacuumlazy.c:118
#define VISIBILITYMAP_ALL_FROZEN
Definition: visibilitymap.h:27
bool analyze_only
Definition: genam.h:47
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
#define XLogIsNeeded()
Definition: xlog.h:146
struct timeval instr_time
Definition: instr_time.h:147
#define Min(x, y)
Definition: c.h:802
BlockNumber tupcount_pages
Definition: vacuumlazy.c:116
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
BufferAccessStrategy strategy
Definition: genam.h:51
#define ItemIdIsUsed(itemId)
Definition: itemid.h:91
#define MaxHeapTuplesPerPage
Definition: htup_details.h:580
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:34
unsigned char uint8
Definition: c.h:294
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define InvalidBuffer
Definition: buf.h:25
static void lazy_cleanup_index(Relation indrel, IndexBulkDeleteResult *stats, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1635
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
Definition: tqual.c:1164
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
Relation index
Definition: genam.h:46
static void lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, Relation *Irel, int nindexes, bool aggressive)
Definition: vacuumlazy.c:463
BlockNumber scanned_pages
Definition: vacuumlazy.c:113
#define INFO
Definition: elog.h:33
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:85
void vacuum_set_xid_limits(Relation rel, int freeze_min_age, int freeze_table_age, int multixact_freeze_min_age, int multixact_freeze_table_age, TransactionId *oldestXmin, TransactionId *freezeLimit, TransactionId *xidFullScanLimit, MultiXactId *multiXactCutoff, MultiXactId *mxactFullScanLimit)
Definition: vacuum.c:593
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
BlockNumber pinskipped_pages
Definition: vacuumlazy.c:114
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
#define LOG
Definition: elog.h:26
Form_pg_class rd_rel
Definition: rel.h:114
#define ItemIdIsDead(itemId)
Definition: itemid.h:112
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1649
#define OidIsValid(objectId)
Definition: c.h:576
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
int freeze_table_age
Definition: vacuum.h:139
int errdetail_internal(const char *fmt,...)
Definition: elog.c:900
uint16 OffsetNumber
Definition: off.h:24
ItemPointerData * ItemPointer
Definition: itemptr.h:49
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
#define FORCE_CHECK_PAGE()
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:34
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:682
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:753
BlockNumber old_rel_pages
Definition: vacuumlazy.c:111
void pg_usleep(long microsec)
Definition: signal.c:53
#define PREFETCH_SIZE
Definition: vacuumlazy.c:104
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7201
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
void pfree(void *pointer)
Definition: mcxt.c:949
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:78
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:319
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3718
#define ERROR
Definition: elog.h:43
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:73
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:662
int max_dead_tuples
Definition: vacuumlazy.c:127
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:167
int freeze_min_age
Definition: vacuum.h:138
void lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params, BufferAccessStrategy bstrategy)
Definition: vacuumlazy.c:182
BlockNumber num_pages
Definition: genam.h:73
BlockNumber pages_free
Definition: genam.h:79
ItemPointer dead_tuples
Definition: vacuumlazy.c:128
ItemPointerData t_self
Definition: htup.h:65
char * get_database_name(Oid dbid)
Definition: dbcommands.c:2056
#define DEBUG2
Definition: elog.h:24
#define HeapTupleHeaderXminCommitted(tup)
Definition: htup_details.h:323
static TransactionId FreezeLimit
Definition: vacuumlazy.c:139
uint32 t_len
Definition: htup.h:64
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6801
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3047
#define NoLock
Definition: lockdefs.h:34
static char * buf
Definition: pg_test_fsync.c:67
#define PageSetAllVisible(page)
Definition: bufpage.h:383
#define FirstOffsetNumber
Definition: off.h:27
#define RowExclusiveLock
Definition: lockdefs.h:38
int errdetail(const char *fmt,...)
Definition: elog.c:873
static MultiXactId MultiXactCutoff
Definition: vacuumlazy.c:140
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:226
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:445
static TransactionId OldestXmin
Definition: vacuumlazy.c:138
Oid t_tableOid
Definition: htup.h:66
BlockNumber pages_deleted
Definition: genam.h:78
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
int num_dead_tuples
Definition: vacuumlazy.c:126
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3256
#define ereport(elevel, rest)
Definition: elog.h:122
bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
Definition: heapam.c:7254
#define PROGRESS_VACUUM_NUM_DEAD_TUPLES
Definition: progress.h:27
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
static bool heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:2104
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:98
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:84
void FreeSpaceMapVacuum(Relation rel)
Definition: freespace.c:379
#define MaxAllocSize
Definition: memutils.h:40
void initStringInfo(StringInfo str)
Definition: stringinfo.c:46
#define WARNING
Definition: elog.h:40
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:1594
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:332
static int elevel
Definition: vacuumlazy.c:136
#define ngettext(s, p, n)
Definition: c.h:967
bool hasindex
Definition: vacuumlazy.c:109
int VacuumPageDirty
Definition: globals.c:136
void * palloc0(Size size)
Definition: mcxt.c:877
void UnlockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:382
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:83
void pgstat_progress_end_command(void)
Definition: pgstat.c:3144
IndexBulkDeleteResult * index_bulk_delete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: indexam.c:743
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:147
Oid MyDatabaseId
Definition: globals.c:77
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define InvalidMultiXactId
Definition: multixact.h:23
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6623
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
static bool should_attempt_truncation(LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1705
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:35
void PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:529
int num_index_scans
Definition: vacuumlazy.c:129
int maintenance_work_mem
Definition: globals.c:114
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:275
static void vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:430
int message_level
Definition: genam.h:49
TransactionId MultiXactId
Definition: c.h:455
RelFileNode rd_node
Definition: rel.h:85
int errmsg_internal(const char *fmt,...)
Definition: elog.c:827
double num_heap_tuples
Definition: genam.h:50
#define Max(x, y)
Definition: c.h:796
static BufferAccessStrategy vac_strategy
Definition: vacuumlazy.c:142
#define PageClearAllVisible(page)
Definition: bufpage.h:385
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:691
#define Assert(condition)
Definition: c.h:670
double new_dead_tuples
Definition: vacuumlazy.c:120
Definition: regguts.h:298
TransactionId latestRemovedXid
Definition: vacuumlazy.c:130
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:202
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1604
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:33
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:312
#define VM_ALL_VISIBLE(r, b, v)
Definition: visibilitymap.h:32
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
Definition: pgstat.c:3115
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
size_t Size
Definition: c.h:404
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:30
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define InvalidBlockNumber
Definition: block.h:33
XLogRecPtr log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
Definition: heapam.c:7401
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
int log_min_duration
Definition: vacuum.h:145
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:514
IndexBulkDeleteResult * index_vacuum_cleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: indexam.c:764
#define VISIBILITYMAP_ALL_VISIBLE
Definition: visibilitymap.h:26
struct LVRelStats LVRelStats
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:153
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples)
Definition: pgstat.c:1405
#define PageGetLSN(page)
Definition: bufpage.h:362
#define AccessExclusiveLock
Definition: lockdefs.h:45
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
BlockNumber pages_removed
Definition: vacuumlazy.c:121
BlockNumber nonempty_pages
Definition: vacuumlazy.c:123
void PageRepairFragmentation(Page page)
Definition: bufpage.c:479
#define PageIsNew(page)
Definition: bufpage.h:225
void * palloc(Size size)
Definition: mcxt.c:848
int errmsg(const char *fmt,...)
Definition: elog.c:797
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
BlockNumber frozenskipped_pages
Definition: vacuumlazy.c:115
double scanned_tuples
Definition: vacuumlazy.c:117
int old_snapshot_threshold
Definition: snapmgr.c:75
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3154
int i
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup)
Definition: vacuumlazy.c:1552
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:98
int VacuumPageMiss
Definition: globals.c:135
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:700
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define ItemIdSetUnused(itemId)
Definition: itemid.h:127
static int vac_cmp_itemptr(const void *left, const void *right)
Definition: vacuumlazy.c:2071
void vacuum_delay_point(void)
Definition: vacuum.c:1658
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1624
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool in_outer_xact)
Definition: vacuum.c:877
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
#define LAZY_ALLOC_TUPLES
Definition: vacuumlazy.c:92
double num_index_tuples
Definition: genam.h:76
int Buffer
Definition: buf.h:23
#define _(x)
Definition: elog.c:84
#define RelationGetRelid(relation)
Definition: rel.h:425
int multixact_freeze_min_age
Definition: vacuum.h:140
bool estimated_count
Definition: genam.h:75
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
Definition: vacuumlazy.c:1723
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:32
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:74
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:105
static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
Definition: vacuumlazy.c:1993
bool estimated_count
Definition: genam.h:48
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:227
#define RelationGetNamespace(relation)
Definition: rel.h:452
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:41
bool lock_waiter_detected
Definition: vacuumlazy.c:131