PostgreSQL Source Code  git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
gistvacuum.c File Reference
#include "postgres.h"
#include "access/genam.h"
#include "access/gist_private.h"
#include "access/transam.h"
#include "commands/vacuum.h"
#include "lib/integerset.h"
#include "miscadmin.h"
#include "storage/indexfsm.h"
#include "storage/lmgr.h"
#include "utils/memutils.h"
Include dependency graph for gistvacuum.c:

Go to the source code of this file.

Data Structures

struct  GistVacState
 

Functions

static void gistvacuumscan (IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
 
static void gistvacuumpage (GistVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno)
 
static void gistvacuum_delete_empty_pages (IndexVacuumInfo *info, GistVacState *vstate)
 
static bool gistdeletepage (IndexVacuumInfo *info, IndexBulkDeleteResult *stats, Buffer parentBuffer, OffsetNumber downlink, Buffer leafBuffer)
 
IndexBulkDeleteResultgistbulkdelete (IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
 
IndexBulkDeleteResultgistvacuumcleanup (IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 

Function Documentation

◆ gistbulkdelete()

IndexBulkDeleteResult* gistbulkdelete ( IndexVacuumInfo info,
IndexBulkDeleteResult stats,
IndexBulkDeleteCallback  callback,
void *  callback_state 
)

Definition at line 59 of file gistvacuum.c.

61 {
62  /* allocate stats if first time through, else re-use existing struct */
63  if (stats == NULL)
65 
66  gistvacuumscan(info, stats, callback, callback_state);
67 
68  return stats;
69 }
static void gistvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: gistvacuum.c:125
void * palloc0(Size size)
Definition: mcxt.c:1347
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:46

References callback(), gistvacuumscan(), and palloc0().

Referenced by gisthandler().

◆ gistdeletepage()

static bool gistdeletepage ( IndexVacuumInfo info,
IndexBulkDeleteResult stats,
Buffer  parentBuffer,
OffsetNumber  downlink,
Buffer  leafBuffer 
)
static

Definition at line 588 of file gistvacuum.c.

591 {
592  Page parentPage = BufferGetPage(parentBuffer);
593  Page leafPage = BufferGetPage(leafBuffer);
594  ItemId iid;
595  IndexTuple idxtuple;
596  XLogRecPtr recptr;
597  FullTransactionId txid;
598 
599  /*
600  * Check that the leaf is still empty and deletable.
601  */
602  if (!GistPageIsLeaf(leafPage))
603  {
604  /* a leaf page should never become a non-leaf page */
605  Assert(false);
606  return false;
607  }
608 
609  if (GistFollowRight(leafPage))
610  return false; /* don't mess with a concurrent page split */
611 
613  return false; /* not empty anymore */
614 
615  /*
616  * Ok, the leaf is deletable. Is the downlink in the parent page still
617  * valid? It might have been moved by a concurrent insert. We could try
618  * to re-find it by scanning the page again, possibly moving right if the
619  * was split. But for now, let's keep it simple and just give up. The
620  * next VACUUM will pick it up.
621  */
622  if (PageIsNew(parentPage) || GistPageIsDeleted(parentPage) ||
623  GistPageIsLeaf(parentPage))
624  {
625  /* shouldn't happen, internal pages are never deleted */
626  Assert(false);
627  return false;
628  }
629 
630  if (PageGetMaxOffsetNumber(parentPage) < downlink
631  || PageGetMaxOffsetNumber(parentPage) <= FirstOffsetNumber)
632  return false;
633 
634  iid = PageGetItemId(parentPage, downlink);
635  idxtuple = (IndexTuple) PageGetItem(parentPage, iid);
636  if (BufferGetBlockNumber(leafBuffer) !=
637  ItemPointerGetBlockNumber(&(idxtuple->t_tid)))
638  return false;
639 
640  /*
641  * All good, proceed with the deletion.
642  *
643  * The page cannot be immediately recycled, because in-progress scans that
644  * saw the downlink might still visit it. Mark the page with the current
645  * next-XID counter, so that we know when it can be recycled. Once that
646  * XID becomes older than GlobalXmin, we know that all scans that are
647  * currently in progress must have ended. (That's much more conservative
648  * than needed, but let's keep it safe and simple.)
649  */
650  txid = ReadNextFullTransactionId();
651 
653 
654  /* mark the page as deleted */
655  MarkBufferDirty(leafBuffer);
656  GistPageSetDeleted(leafPage, txid);
657  stats->pages_newly_deleted++;
658  stats->pages_deleted++;
659 
660  /* remove the downlink from the parent */
661  MarkBufferDirty(parentBuffer);
662  PageIndexTupleDelete(parentPage, downlink);
663 
664  if (RelationNeedsWAL(info->index))
665  recptr = gistXLogPageDelete(leafBuffer, txid, parentBuffer, downlink);
666  else
667  recptr = gistGetFakeLSN(info->index);
668  PageSetLSN(parentPage, recptr);
669  PageSetLSN(leafPage, recptr);
670 
672 
673  return true;
674 }
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3724
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2532
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:400
void PageIndexTupleDelete(Page page, OffsetNumber offnum)
Definition: bufpage.c:1041
Pointer Page
Definition: bufpage.h:81
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:354
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
static bool PageIsNew(Page page)
Definition: bufpage.h:233
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:391
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:372
#define Assert(condition)
Definition: c.h:837
#define GistPageIsLeaf(page)
Definition: gist.h:169
static void GistPageSetDeleted(Page page, FullTransactionId deletexid)
Definition: gist.h:204
#define GistFollowRight(page)
Definition: gist.h:182
#define GistPageIsDeleted(page)
Definition: gist.h:172
XLogRecPtr gistGetFakeLSN(Relation rel)
Definition: gistutil.c:1016
XLogRecPtr gistXLogPageDelete(Buffer buffer, FullTransactionId xid, Buffer parentBuffer, OffsetNumber downlinkOffset)
Definition: gistxlog.c:552
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
IndexTupleData * IndexTuple
Definition: itup.h:53
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151
#define InvalidOffsetNumber
Definition: off.h:26
#define FirstOffsetNumber
Definition: off.h:27
#define RelationNeedsWAL(relation)
Definition: rel.h:628
BlockNumber pages_deleted
Definition: genam.h:82
BlockNumber pages_newly_deleted
Definition: genam.h:81
ItemPointerData t_tid
Definition: itup.h:37
Relation index
Definition: genam.h:46
FullTransactionId ReadNextFullTransactionId(void)
Definition: varsup.c:288
uint64 XLogRecPtr
Definition: xlogdefs.h:21

References Assert, BufferGetBlockNumber(), BufferGetPage(), END_CRIT_SECTION, FirstOffsetNumber, GistFollowRight, gistGetFakeLSN(), GistPageIsDeleted, GistPageIsLeaf, GistPageSetDeleted(), gistXLogPageDelete(), IndexVacuumInfo::index, InvalidOffsetNumber, ItemPointerGetBlockNumber(), MarkBufferDirty(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageIndexTupleDelete(), PageIsNew(), IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_newly_deleted, PageSetLSN(), ReadNextFullTransactionId(), RelationNeedsWAL, START_CRIT_SECTION, and IndexTupleData::t_tid.

Referenced by gistvacuum_delete_empty_pages().

◆ gistvacuum_delete_empty_pages()

static void gistvacuum_delete_empty_pages ( IndexVacuumInfo info,
GistVacState vstate 
)
static

Definition at line 461 of file gistvacuum.c.

462 {
463  Relation rel = info->index;
464  BlockNumber empty_pages_remaining;
465  uint64 blkno;
466 
467  /*
468  * Rescan all inner pages to find those that have empty child pages.
469  */
470  empty_pages_remaining = intset_num_entries(vstate->empty_leaf_set);
472  while (empty_pages_remaining > 0 &&
473  intset_iterate_next(vstate->internal_page_set, &blkno))
474  {
475  Buffer buffer;
476  Page page;
477  OffsetNumber off,
478  maxoff;
479  OffsetNumber todelete[MaxOffsetNumber];
480  BlockNumber leafs_to_delete[MaxOffsetNumber];
481  int ntodelete;
482  int deleted;
483 
484  buffer = ReadBufferExtended(rel, MAIN_FORKNUM, (BlockNumber) blkno,
485  RBM_NORMAL, info->strategy);
486 
487  LockBuffer(buffer, GIST_SHARE);
488  page = (Page) BufferGetPage(buffer);
489 
490  if (PageIsNew(page) || GistPageIsDeleted(page) || GistPageIsLeaf(page))
491  {
492  /*
493  * This page was an internal page earlier, but now it's something
494  * else. Shouldn't happen...
495  */
496  Assert(false);
497  UnlockReleaseBuffer(buffer);
498  continue;
499  }
500 
501  /*
502  * Scan all the downlinks, and see if any of them point to empty leaf
503  * pages.
504  */
505  maxoff = PageGetMaxOffsetNumber(page);
506  ntodelete = 0;
507  for (off = FirstOffsetNumber;
508  off <= maxoff && ntodelete < maxoff - 1;
509  off = OffsetNumberNext(off))
510  {
511  ItemId iid = PageGetItemId(page, off);
512  IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
513  BlockNumber leafblk;
514 
515  leafblk = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
516  if (intset_is_member(vstate->empty_leaf_set, leafblk))
517  {
518  leafs_to_delete[ntodelete] = leafblk;
519  todelete[ntodelete++] = off;
520  }
521  }
522 
523  /*
524  * In order to avoid deadlock, child page must be locked before
525  * parent, so we must release the lock on the parent, lock the child,
526  * and then re-acquire the lock the parent. (And we wouldn't want to
527  * do I/O, while holding a lock, anyway.)
528  *
529  * At the instant that we're not holding a lock on the parent, the
530  * downlink might get moved by a concurrent insert, so we must
531  * re-check that it still points to the same child page after we have
532  * acquired both locks. Also, another backend might have inserted a
533  * tuple to the page, so that it is no longer empty. gistdeletepage()
534  * re-checks all these conditions.
535  */
536  LockBuffer(buffer, GIST_UNLOCK);
537 
538  deleted = 0;
539  for (int i = 0; i < ntodelete; i++)
540  {
541  Buffer leafbuf;
542 
543  /*
544  * Don't remove the last downlink from the parent. That would
545  * confuse the insertion code.
546  */
548  break;
549 
550  leafbuf = ReadBufferExtended(rel, MAIN_FORKNUM, leafs_to_delete[i],
551  RBM_NORMAL, info->strategy);
552  LockBuffer(leafbuf, GIST_EXCLUSIVE);
553  gistcheckpage(rel, leafbuf);
554 
555  LockBuffer(buffer, GIST_EXCLUSIVE);
556  if (gistdeletepage(info, vstate->stats,
557  buffer, todelete[i] - deleted,
558  leafbuf))
559  deleted++;
560  LockBuffer(buffer, GIST_UNLOCK);
561 
562  UnlockReleaseBuffer(leafbuf);
563  }
564 
565  ReleaseBuffer(buffer);
566 
567  /*
568  * We can stop the scan as soon as we have seen the downlinks, even if
569  * we were not able to remove them all.
570  */
571  empty_pages_remaining -= ntodelete;
572  }
573 }
uint32 BlockNumber
Definition: block.h:31
int Buffer
Definition: buf.h:23
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4924
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4941
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5158
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:793
@ RBM_NORMAL
Definition: bufmgr.h:45
#define GIST_UNLOCK
Definition: gist_private.h:44
#define GIST_EXCLUSIVE
Definition: gist_private.h:43
#define GIST_SHARE
Definition: gist_private.h:42
void gistcheckpage(Relation rel, Buffer buf)
Definition: gistutil.c:785
static bool gistdeletepage(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, Buffer parentBuffer, OffsetNumber downlink, Buffer leafBuffer)
Definition: gistvacuum.c:588
uint64 intset_num_entries(IntegerSet *intset)
Definition: integerset.c:349
bool intset_is_member(IntegerSet *intset, uint64 x)
Definition: integerset.c:553
void intset_begin_iterate(IntegerSet *intset)
Definition: integerset.c:623
bool intset_iterate_next(IntegerSet *intset, uint64 *next)
Definition: integerset.c:642
int i
Definition: isn.c:72
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define MaxOffsetNumber
Definition: off.h:28
@ MAIN_FORKNUM
Definition: relpath.h:58
IndexBulkDeleteResult * stats
Definition: gistvacuum.c:31
IntegerSet * empty_leaf_set
Definition: gistvacuum.c:41
IntegerSet * internal_page_set
Definition: gistvacuum.c:40
BufferAccessStrategy strategy
Definition: genam.h:53

References Assert, BufferGetPage(), GistVacState::empty_leaf_set, FirstOffsetNumber, GIST_EXCLUSIVE, GIST_SHARE, GIST_UNLOCK, gistcheckpage(), gistdeletepage(), GistPageIsDeleted, GistPageIsLeaf, i, IndexVacuumInfo::index, GistVacState::internal_page_set, intset_begin_iterate(), intset_is_member(), intset_iterate_next(), intset_num_entries(), ItemPointerGetBlockNumber(), LockBuffer(), MAIN_FORKNUM, MaxOffsetNumber, OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageIsNew(), RBM_NORMAL, ReadBufferExtended(), ReleaseBuffer(), GistVacState::stats, IndexVacuumInfo::strategy, IndexTupleData::t_tid, and UnlockReleaseBuffer().

Referenced by gistvacuumscan().

◆ gistvacuumcleanup()

IndexBulkDeleteResult* gistvacuumcleanup ( IndexVacuumInfo info,
IndexBulkDeleteResult stats 
)

Definition at line 75 of file gistvacuum.c.

76 {
77  /* No-op in ANALYZE ONLY mode */
78  if (info->analyze_only)
79  return stats;
80 
81  /*
82  * If gistbulkdelete was called, we need not do anything, just return the
83  * stats from the latest gistbulkdelete call. If it wasn't called, we
84  * still need to do a pass over the index, to obtain index statistics.
85  */
86  if (stats == NULL)
87  {
89  gistvacuumscan(info, stats, NULL, NULL);
90  }
91 
92  /*
93  * It's quite possible for us to be fooled by concurrent page splits into
94  * double-counting some index tuples, so disbelieve any total that exceeds
95  * the underlying heap's count ... if we know that accurately. Otherwise
96  * this might just make matters worse.
97  */
98  if (!info->estimated_count)
99  {
100  if (stats->num_index_tuples > info->num_heap_tuples)
101  stats->num_index_tuples = info->num_heap_tuples;
102  }
103 
104  return stats;
105 }
double num_index_tuples
Definition: genam.h:79
double num_heap_tuples
Definition: genam.h:52
bool analyze_only
Definition: genam.h:48
bool estimated_count
Definition: genam.h:50

References IndexVacuumInfo::analyze_only, IndexVacuumInfo::estimated_count, gistvacuumscan(), IndexVacuumInfo::num_heap_tuples, IndexBulkDeleteResult::num_index_tuples, and palloc0().

Referenced by gisthandler().

◆ gistvacuumpage()

static void gistvacuumpage ( GistVacState vstate,
BlockNumber  blkno,
BlockNumber  orig_blkno 
)
static

Definition at line 272 of file gistvacuum.c.

273 {
274  IndexVacuumInfo *info = vstate->info;
276  void *callback_state = vstate->callback_state;
277  Relation rel = info->index;
278  Buffer buffer;
279  Page page;
280  BlockNumber recurse_to;
281 
282 restart:
283  recurse_to = InvalidBlockNumber;
284 
285  /* call vacuum_delay_point while not holding any buffer lock */
287 
288  buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
289  info->strategy);
290 
291  /*
292  * We are not going to stay here for a long time, aggressively grab an
293  * exclusive lock.
294  */
295  LockBuffer(buffer, GIST_EXCLUSIVE);
296  page = (Page) BufferGetPage(buffer);
297 
298  if (gistPageRecyclable(page))
299  {
300  /* Okay to recycle this page */
301  RecordFreeIndexPage(rel, blkno);
302  vstate->stats->pages_deleted++;
303  vstate->stats->pages_free++;
304  }
305  else if (GistPageIsDeleted(page))
306  {
307  /* Already deleted, but can't recycle yet */
308  vstate->stats->pages_deleted++;
309  }
310  else if (GistPageIsLeaf(page))
311  {
312  OffsetNumber todelete[MaxOffsetNumber];
313  int ntodelete = 0;
314  int nremain;
315  GISTPageOpaque opaque = GistPageGetOpaque(page);
316  OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
317 
318  /*
319  * Check whether we need to recurse back to earlier pages. What we
320  * are concerned about is a page split that happened since we started
321  * the vacuum scan. If the split moved some tuples to a lower page
322  * then we might have missed 'em. If so, set up for tail recursion.
323  *
324  * This is similar to the checks we do during searches, when following
325  * a downlink, but we don't need to jump to higher-numbered pages,
326  * because we will process them later, anyway.
327  */
328  if ((GistFollowRight(page) ||
329  vstate->startNSN < GistPageGetNSN(page)) &&
330  (opaque->rightlink != InvalidBlockNumber) &&
331  (opaque->rightlink < orig_blkno))
332  {
333  recurse_to = opaque->rightlink;
334  }
335 
336  /*
337  * Scan over all items to see which ones need to be deleted according
338  * to the callback function.
339  */
340  if (callback)
341  {
342  OffsetNumber off;
343 
344  for (off = FirstOffsetNumber;
345  off <= maxoff;
346  off = OffsetNumberNext(off))
347  {
348  ItemId iid = PageGetItemId(page, off);
349  IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
350 
351  if (callback(&(idxtuple->t_tid), callback_state))
352  todelete[ntodelete++] = off;
353  }
354  }
355 
356  /*
357  * Apply any needed deletes. We issue just one WAL record per page,
358  * so as to minimize WAL traffic.
359  */
360  if (ntodelete > 0)
361  {
363 
364  MarkBufferDirty(buffer);
365 
366  PageIndexMultiDelete(page, todelete, ntodelete);
367  GistMarkTuplesDeleted(page);
368 
369  if (RelationNeedsWAL(rel))
370  {
371  XLogRecPtr recptr;
372 
373  recptr = gistXLogUpdate(buffer,
374  todelete, ntodelete,
375  NULL, 0, InvalidBuffer);
376  PageSetLSN(page, recptr);
377  }
378  else
379  PageSetLSN(page, gistGetFakeLSN(rel));
380 
382 
383  vstate->stats->tuples_removed += ntodelete;
384  /* must recompute maxoff */
385  maxoff = PageGetMaxOffsetNumber(page);
386  }
387 
388  nremain = maxoff - FirstOffsetNumber + 1;
389  if (nremain == 0)
390  {
391  /*
392  * The page is now completely empty. Remember its block number,
393  * so that we will try to delete the page in the second stage.
394  *
395  * Skip this when recursing, because IntegerSet requires that the
396  * values are added in ascending order. The next VACUUM will pick
397  * it up.
398  */
399  if (blkno == orig_blkno)
400  intset_add_member(vstate->empty_leaf_set, blkno);
401  }
402  else
403  vstate->stats->num_index_tuples += nremain;
404  }
405  else
406  {
407  /*
408  * On an internal page, check for "invalid tuples", left behind by an
409  * incomplete page split on PostgreSQL 9.0 or below. These are not
410  * created by newer PostgreSQL versions, but unfortunately, there is
411  * no version number anywhere in a GiST index, so we don't know
412  * whether this index might still contain invalid tuples or not.
413  */
414  OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
415  OffsetNumber off;
416 
417  for (off = FirstOffsetNumber;
418  off <= maxoff;
419  off = OffsetNumberNext(off))
420  {
421  ItemId iid = PageGetItemId(page, off);
422  IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
423 
424  if (GistTupleIsInvalid(idxtuple))
425  ereport(LOG,
426  (errmsg("index \"%s\" contains an inner tuple marked as invalid",
428  errdetail("This is caused by an incomplete page split at crash recovery before upgrading to PostgreSQL 9.1."),
429  errhint("Please REINDEX it.")));
430  }
431 
432  /*
433  * Remember the block number of this page, so that we can revisit it
434  * later in gistvacuum_delete_empty_pages(), when we search for
435  * parents of empty leaf pages.
436  */
437  if (blkno == orig_blkno)
438  intset_add_member(vstate->internal_page_set, blkno);
439  }
440 
441  UnlockReleaseBuffer(buffer);
442 
443  /*
444  * This is really tail recursion, but if the compiler is too stupid to
445  * optimize it as such, we'd eat an uncomfortably large amount of stack
446  * space per recursion level (due to the deletable[] array). A failure is
447  * improbable since the number of levels isn't likely to be large ... but
448  * just in case, let's hand-optimize into a loop.
449  */
450  if (recurse_to != InvalidBlockNumber)
451  {
452  blkno = recurse_to;
453  goto restart;
454  }
455 }
#define InvalidBlockNumber
Definition: block.h:33
#define InvalidBuffer
Definition: buf.h:25
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition: bufpage.c:1150
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errhint(const char *fmt,...)
Definition: elog.c:1317
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define LOG
Definition: elog.h:31
#define ereport(elevel,...)
Definition: elog.h:149
bool(* IndexBulkDeleteCallback)(ItemPointer itemptr, void *state)
Definition: genam.h:87
#define GistMarkTuplesDeleted(page)
Definition: gist.h:175
#define GistPageGetOpaque(page)
Definition: gist.h:167
#define GistPageGetNSN(page)
Definition: gist.h:186
#define GistTupleIsInvalid(itup)
Definition: gist_private.h:288
bool gistPageRecyclable(Page page)
Definition: gistutil.c:888
XLogRecPtr gistXLogUpdate(Buffer buffer, OffsetNumber *todelete, int ntodelete, IndexTuple *itup, int ituplen, Buffer leftchildbuf)
Definition: gistxlog.c:629
void RecordFreeIndexPage(Relation rel, BlockNumber freeBlock)
Definition: indexfsm.c:52
void intset_add_member(IntegerSet *intset, uint64 x)
Definition: integerset.c:369
#define RelationGetRelationName(relation)
Definition: rel.h:539
BlockNumber rightlink
Definition: gist.h:80
void * callback_state
Definition: gistvacuum.c:33
IndexVacuumInfo * info
Definition: gistvacuum.c:30
IndexBulkDeleteCallback callback
Definition: gistvacuum.c:32
GistNSN startNSN
Definition: gistvacuum.c:34
BlockNumber pages_free
Definition: genam.h:83
double tuples_removed
Definition: genam.h:80
void vacuum_delay_point(void)
Definition: vacuum.c:2362

References BufferGetPage(), GistVacState::callback, callback(), GistVacState::callback_state, GistVacState::empty_leaf_set, END_CRIT_SECTION, ereport, errdetail(), errhint(), errmsg(), FirstOffsetNumber, GIST_EXCLUSIVE, GistFollowRight, gistGetFakeLSN(), GistMarkTuplesDeleted, GistPageGetNSN, GistPageGetOpaque, GistPageIsDeleted, GistPageIsLeaf, gistPageRecyclable(), GistTupleIsInvalid, gistXLogUpdate(), IndexVacuumInfo::index, GistVacState::info, GistVacState::internal_page_set, intset_add_member(), InvalidBlockNumber, InvalidBuffer, LockBuffer(), LOG, MAIN_FORKNUM, MarkBufferDirty(), MaxOffsetNumber, IndexBulkDeleteResult::num_index_tuples, OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageIndexMultiDelete(), IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, PageSetLSN(), RBM_NORMAL, ReadBufferExtended(), RecordFreeIndexPage(), RelationGetRelationName, RelationNeedsWAL, GISTPageOpaqueData::rightlink, START_CRIT_SECTION, GistVacState::startNSN, GistVacState::stats, IndexVacuumInfo::strategy, IndexTupleData::t_tid, IndexBulkDeleteResult::tuples_removed, UnlockReleaseBuffer(), and vacuum_delay_point().

Referenced by gistvacuumscan().

◆ gistvacuumscan()

static void gistvacuumscan ( IndexVacuumInfo info,
IndexBulkDeleteResult stats,
IndexBulkDeleteCallback  callback,
void *  callback_state 
)
static

Definition at line 125 of file gistvacuum.c.

127 {
128  Relation rel = info->index;
129  GistVacState vstate;
130  BlockNumber num_pages;
131  bool needLock;
132  BlockNumber blkno;
133  MemoryContext oldctx;
134 
135  /*
136  * Reset fields that track information about the entire index now. This
137  * avoids double-counting in the case where a single VACUUM command
138  * requires multiple scans of the index.
139  *
140  * Avoid resetting the tuples_removed and pages_newly_deleted fields here,
141  * since they track information about the VACUUM command, and so must last
142  * across each call to gistvacuumscan().
143  *
144  * (Note that pages_free is treated as state about the whole index, not
145  * the current VACUUM. This is appropriate because RecordFreeIndexPage()
146  * calls are idempotent, and get repeated for the same deleted pages in
147  * some scenarios. The point for us is to track the number of recyclable
148  * pages in the index at the end of the VACUUM command.)
149  */
150  stats->num_pages = 0;
151  stats->estimated_count = false;
152  stats->num_index_tuples = 0;
153  stats->pages_deleted = 0;
154  stats->pages_free = 0;
155 
156  /*
157  * Create the integer sets to remember all the internal and the empty leaf
158  * pages in page_set_context. Internally, the integer set will remember
159  * this context so that the subsequent allocations for these integer sets
160  * will be done from the same context.
161  *
162  * XXX the allocation sizes used below pre-date generation context's block
163  * growing code. These values should likely be benchmarked and set to
164  * more suitable values.
165  */
167  "GiST VACUUM page set context",
168  16 * 1024,
169  16 * 1024,
170  16 * 1024);
171  oldctx = MemoryContextSwitchTo(vstate.page_set_context);
172  vstate.internal_page_set = intset_create();
173  vstate.empty_leaf_set = intset_create();
174  MemoryContextSwitchTo(oldctx);
175 
176  /* Set up info to pass down to gistvacuumpage */
177  vstate.info = info;
178  vstate.stats = stats;
179  vstate.callback = callback;
180  vstate.callback_state = callback_state;
181  if (RelationNeedsWAL(rel))
182  vstate.startNSN = GetInsertRecPtr();
183  else
184  vstate.startNSN = gistGetFakeLSN(rel);
185 
186  /*
187  * The outer loop iterates over all index pages, in physical order (we
188  * hope the kernel will cooperate in providing read-ahead for speed). It
189  * is critical that we visit all leaf pages, including ones added after we
190  * start the scan, else we might fail to delete some deletable tuples.
191  * Hence, we must repeatedly check the relation length. We must acquire
192  * the relation-extension lock while doing so to avoid a race condition:
193  * if someone else is extending the relation, there is a window where
194  * bufmgr/smgr have created a new all-zero page but it hasn't yet been
195  * write-locked by gistNewBuffer(). If we manage to scan such a page
196  * here, we'll improperly assume it can be recycled. Taking the lock
197  * synchronizes things enough to prevent a problem: either num_pages won't
198  * include the new page, or gistNewBuffer already has write lock on the
199  * buffer and it will be fully initialized before we can examine it. (See
200  * also vacuumlazy.c, which has the same issue.) Also, we need not worry
201  * if a page is added immediately after we look; the page splitting code
202  * already has write-lock on the left page before it adds a right page, so
203  * we must already have processed any tuples due to be moved into such a
204  * page.
205  *
206  * We can skip locking for new or temp relations, however, since no one
207  * else could be accessing them.
208  */
209  needLock = !RELATION_IS_LOCAL(rel);
210 
211  blkno = GIST_ROOT_BLKNO;
212  for (;;)
213  {
214  /* Get the current relation length */
215  if (needLock)
217  num_pages = RelationGetNumberOfBlocks(rel);
218  if (needLock)
220 
221  /* Quit if we've scanned the whole relation */
222  if (blkno >= num_pages)
223  break;
224  /* Iterate over pages, then loop back to recheck length */
225  for (; blkno < num_pages; blkno++)
226  gistvacuumpage(&vstate, blkno, blkno);
227  }
228 
229  /*
230  * If we found any recyclable pages (and recorded them in the FSM), then
231  * forcibly update the upper-level FSM pages to ensure that searchers can
232  * find them. It's possible that the pages were also found during
233  * previous scans and so this is a waste of time, but it's cheap enough
234  * relative to scanning the index that it shouldn't matter much, and
235  * making sure that free pages are available sooner not later seems
236  * worthwhile.
237  *
238  * Note that if no recyclable pages exist, we don't bother vacuuming the
239  * FSM at all.
240  */
241  if (stats->pages_free > 0)
243 
244  /* update statistics */
245  stats->num_pages = num_pages;
246 
247  /*
248  * If we saw any empty pages, try to unlink them from the tree so that
249  * they can be reused.
250  */
251  gistvacuum_delete_empty_pages(info, &vstate);
252 
253  /* we don't need the internal and empty page sets anymore */
255  vstate.page_set_context = NULL;
256  vstate.internal_page_set = NULL;
257  vstate.empty_leaf_set = NULL;
258 }
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:273
MemoryContext GenerationContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: generation.c:160
#define GIST_ROOT_BLKNO
Definition: gist_private.h:262
static void gistvacuumpage(GistVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno)
Definition: gistvacuum.c:272
static void gistvacuum_delete_empty_pages(IndexVacuumInfo *info, GistVacState *vstate)
Definition: gistvacuum.c:461
void IndexFreeSpaceMapVacuum(Relation rel)
Definition: indexfsm.c:71
IntegerSet * intset_create(void)
Definition: integerset.c:283
void LockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:419
void UnlockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:469
#define ExclusiveLock
Definition: lockdefs.h:42
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:454
MemoryContextSwitchTo(old_ctx)
#define RELATION_IS_LOCAL(relation)
Definition: rel.h:648
MemoryContext page_set_context
Definition: gistvacuum.c:42
bool estimated_count
Definition: genam.h:78
BlockNumber num_pages
Definition: genam.h:77
XLogRecPtr GetInsertRecPtr(void)
Definition: xlog.c:6482

References GistVacState::callback, callback(), GistVacState::callback_state, CurrentMemoryContext, GistVacState::empty_leaf_set, IndexBulkDeleteResult::estimated_count, ExclusiveLock, GenerationContextCreate(), GetInsertRecPtr(), GIST_ROOT_BLKNO, gistGetFakeLSN(), gistvacuum_delete_empty_pages(), gistvacuumpage(), IndexVacuumInfo::index, IndexFreeSpaceMapVacuum(), GistVacState::info, GistVacState::internal_page_set, intset_create(), LockRelationForExtension(), MemoryContextDelete(), MemoryContextSwitchTo(), IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, GistVacState::page_set_context, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, RELATION_IS_LOCAL, RelationGetNumberOfBlocks, RelationNeedsWAL, GistVacState::startNSN, GistVacState::stats, and UnlockRelationForExtension().

Referenced by gistbulkdelete(), and gistvacuumcleanup().