PostgreSQL Source Code  git master
verify_nbtree.c File Reference
#include "postgres.h"
#include "access/htup_details.h"
#include "access/nbtree.h"
#include "access/table.h"
#include "access/tableam.h"
#include "access/transam.h"
#include "access/xact.h"
#include "catalog/index.h"
#include "catalog/pg_am.h"
#include "commands/tablecmds.h"
#include "lib/bloomfilter.h"
#include "miscadmin.h"
#include "storage/lmgr.h"
#include "storage/smgr.h"
#include "utils/memutils.h"
#include "utils/snapmgr.h"
Include dependency graph for verify_nbtree.c:

Go to the source code of this file.

Data Structures

struct  BtreeCheckState
 
struct  BtreeLevel
 

Macros

#define InvalidBtreeLevel   ((uint32) InvalidBlockNumber)
 
#define BTreeTupleGetNKeyAtts(itup, rel)   Min(IndexRelationGetNumberOfKeyAttributes(rel), BTreeTupleGetNAtts(itup, rel))
 

Typedefs

typedef struct BtreeCheckState BtreeCheckState
 
typedef struct BtreeLevel BtreeLevel
 

Functions

 PG_FUNCTION_INFO_V1 (bt_index_check)
 
 PG_FUNCTION_INFO_V1 (bt_index_parent_check)
 
static void bt_index_check_internal (Oid indrelid, bool parentcheck, bool heapallindexed, bool rootdescend)
 
static void btree_index_checkable (Relation rel)
 
static bool btree_index_mainfork_expected (Relation rel)
 
static void bt_check_every_level (Relation rel, Relation heaprel, bool heapkeyspace, bool readonly, bool heapallindexed, bool rootdescend)
 
static BtreeLevel bt_check_level_from_leftmost (BtreeCheckState *state, BtreeLevel level)
 
static void bt_target_page_check (BtreeCheckState *state)
 
static BTScanInsert bt_right_page_check_scankey (BtreeCheckState *state)
 
static void bt_downlink_check (BtreeCheckState *state, BTScanInsert targetkey, BlockNumber childblock)
 
static void bt_downlink_missing_check (BtreeCheckState *state)
 
static void bt_tuple_present_callback (Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *checkstate)
 
static IndexTuple bt_normalize_tuple (BtreeCheckState *state, IndexTuple itup)
 
static IndexTuple bt_posting_plain_tuple (IndexTuple itup, int n)
 
static bool bt_rootdescend (BtreeCheckState *state, IndexTuple itup)
 
static bool offset_is_negative_infinity (BTPageOpaque opaque, OffsetNumber offset)
 
static bool invariant_l_offset (BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound)
 
static bool invariant_leq_offset (BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound)
 
static bool invariant_g_offset (BtreeCheckState *state, BTScanInsert key, OffsetNumber lowerbound)
 
static bool invariant_l_nontarget_offset (BtreeCheckState *state, BTScanInsert key, BlockNumber nontargetblock, Page nontarget, OffsetNumber upperbound)
 
static Page palloc_btree_page (BtreeCheckState *state, BlockNumber blocknum)
 
static BTScanInsert bt_mkscankey_pivotsearch (Relation rel, IndexTuple itup)
 
static ItemId PageGetItemIdCareful (BtreeCheckState *state, BlockNumber block, Page page, OffsetNumber offset)
 
static ItemPointer BTreeTupleGetHeapTIDCareful (BtreeCheckState *state, IndexTuple itup, bool nonpivot)
 
static ItemPointer BTreeTupleGetPointsToTID (IndexTuple itup)
 
Datum bt_index_check (PG_FUNCTION_ARGS)
 
Datum bt_index_parent_check (PG_FUNCTION_ARGS)
 

Variables

 PG_MODULE_MAGIC
 

Macro Definition Documentation

◆ BTreeTupleGetNKeyAtts

#define BTreeTupleGetNKeyAtts (   itup,
  rel 
)    Min(IndexRelationGetNumberOfKeyAttributes(rel), BTreeTupleGetNAtts(itup, rel))

Definition at line 50 of file verify_nbtree.c.

Referenced by invariant_l_nontarget_offset(), and invariant_l_offset().

◆ InvalidBtreeLevel

#define InvalidBtreeLevel   ((uint32) InvalidBlockNumber)

Definition at line 49 of file verify_nbtree.c.

Referenced by bt_check_every_level(), and bt_check_level_from_leftmost().

Typedef Documentation

◆ BtreeCheckState

◆ BtreeLevel

typedef struct BtreeLevel BtreeLevel

Function Documentation

◆ bt_check_every_level()

static void bt_check_every_level ( Relation  rel,
Relation  heaprel,
bool  heapkeyspace,
bool  readonly,
bool  heapallindexed,
bool  rootdescend 
)
static

Definition at line 389 of file verify_nbtree.c.

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert, BAS_BULKREAD, bloom_create(), bloom_free(), bloom_prop_bits_set(), bt_check_level_from_leftmost(), bt_tuple_present_callback(), BTMetaPageData::btm_fastlevel, BTMetaPageData::btm_fastroot, BTMetaPageData::btm_level, BTMetaPageData::btm_root, BTPageGetMeta, BTREE_METAPAGE, BuildIndexInfo(), BtreeCheckState::checkstrategy, CurrentMemoryContext, DEBUG1, BtreeCheckState::downlinkfilter, elog, ereport, errcode(), errdetail_internal(), errhint(), errmsg(), errmsg_internal(), ERROR, BtreeCheckState::filter, GetAccessStrategy(), GetTransactionSnapshot(), BtreeCheckState::heapallindexed, BtreeCheckState::heapkeyspace, BtreeCheckState::heaprel, HeapTupleHeaderGetXmin, BtreeCheckState::heaptuplespresent, IndexInfo::ii_Concurrent, IndexInfo::ii_ExclusionOps, IndexInfo::ii_ExclusionProcs, IndexInfo::ii_ExclusionStrats, IndexInfo::ii_Unique, INT64_FORMAT, InvalidBlockNumber, InvalidBtreeLevel, IsolationUsesXactSnapshot, BtreeLevel::istruerootlevel, BtreeLevel::leftmost, BtreeLevel::level, maintenance_work_mem, Max, MaxTIDsPerBTreePage, MemoryContextDelete(), P_NONE, palloc0(), palloc_btree_page(), random(), RelationData::rd_index, RelationData::rd_indextuple, RelationData::rd_rel, BtreeCheckState::readonly, RecentGlobalXmin, RegisterSnapshot(), BtreeCheckState::rel, RelationGetNumberOfBlocks, RelationGetRelationName, BtreeCheckState::rightsplit, BtreeCheckState::rootdescend, SnapshotAny, HeapTupleData::t_data, table_beginscan_strat(), table_index_build_scan(), BtreeCheckState::targetcontext, TransactionIdIsValid, TransactionIdPrecedes(), UnregisterSnapshot(), work_mem, and SnapshotData::xmin.

Referenced by bt_index_check_internal().

391 {
393  Page metapage;
394  BTMetaPageData *metad;
395  uint32 previouslevel;
396  BtreeLevel current;
397  Snapshot snapshot = SnapshotAny;
398 
399  /*
400  * RecentGlobalXmin assertion matches index_getnext_tid(). See note on
401  * RecentGlobalXmin/B-Tree page deletion.
402  */
404 
405  /*
406  * Initialize state for entire verification operation
407  */
408  state = palloc0(sizeof(BtreeCheckState));
409  state->rel = rel;
410  state->heaprel = heaprel;
411  state->heapkeyspace = heapkeyspace;
412  state->readonly = readonly;
413  state->heapallindexed = heapallindexed;
414  state->rootdescend = rootdescend;
415 
416  if (state->heapallindexed)
417  {
418  int64 total_pages;
419  int64 total_elems;
420  uint64 seed;
421 
422  /*
423  * Size Bloom filter based on estimated number of tuples in index,
424  * while conservatively assuming that each block must contain at least
425  * MaxTIDsPerBTreePage / 3 "plain" tuples -- see
426  * bt_posting_plain_tuple() for definition, and details of how posting
427  * list tuples are handled.
428  */
429  total_pages = RelationGetNumberOfBlocks(rel);
430  total_elems = Max(total_pages * (MaxTIDsPerBTreePage / 3),
431  (int64) state->rel->rd_rel->reltuples);
432  /* Random seed relies on backend srandom() call to avoid repetition */
433  seed = random();
434  /* Create Bloom filter to fingerprint index */
435  state->filter = bloom_create(total_elems, maintenance_work_mem, seed);
436  state->heaptuplespresent = 0;
437 
438  /*
439  * Register our own snapshot in !readonly case, rather than asking
440  * table_index_build_scan() to do this for us later. This needs to
441  * happen before index fingerprinting begins, so we can later be
442  * certain that index fingerprinting should have reached all tuples
443  * returned by table_index_build_scan().
444  *
445  * In readonly case, we also check for problems with missing
446  * downlinks. A second Bloom filter is used for this.
447  */
448  if (!state->readonly)
449  {
451 
452  /*
453  * GetTransactionSnapshot() always acquires a new MVCC snapshot in
454  * READ COMMITTED mode. A new snapshot is guaranteed to have all
455  * the entries it requires in the index.
456  *
457  * We must defend against the possibility that an old xact
458  * snapshot was returned at higher isolation levels when that
459  * snapshot is not safe for index scans of the target index. This
460  * is possible when the snapshot sees tuples that are before the
461  * index's indcheckxmin horizon. Throwing an error here should be
462  * very rare. It doesn't seem worth using a secondary snapshot to
463  * avoid this.
464  */
465  if (IsolationUsesXactSnapshot() && rel->rd_index->indcheckxmin &&
467  snapshot->xmin))
468  ereport(ERROR,
469  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
470  errmsg("index \"%s\" cannot be verified using transaction snapshot",
471  RelationGetRelationName(rel))));
472  }
473  else
474  {
475  /*
476  * Extra readonly downlink check.
477  *
478  * In readonly case, we know that there cannot be a concurrent
479  * page split or a concurrent page deletion, which gives us the
480  * opportunity to verify that every non-ignorable page had a
481  * downlink one level up. We must be tolerant of interrupted page
482  * splits and page deletions, though. This is taken care of in
483  * bt_downlink_missing_check().
484  */
485  state->downlinkfilter = bloom_create(total_pages, work_mem, seed);
486  }
487  }
488 
489  Assert(!state->rootdescend || state->readonly);
490  if (state->rootdescend && !state->heapkeyspace)
491  ereport(ERROR,
492  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
493  errmsg("cannot verify that tuples from index \"%s\" can each be found by an independent index search",
495  errhint("Only B-Tree version 4 indexes support rootdescend verification.")));
496 
497  /* Create context for page */
499  "amcheck context",
502 
503  /* Get true root block from meta-page */
504  metapage = palloc_btree_page(state, BTREE_METAPAGE);
505  metad = BTPageGetMeta(metapage);
506 
507  /*
508  * Certain deletion patterns can result in "skinny" B-Tree indexes, where
509  * the fast root and true root differ.
510  *
511  * Start from the true root, not the fast root, unlike conventional index
512  * scans. This approach is more thorough, and removes the risk of
513  * following a stale fast root from the meta page.
514  */
515  if (metad->btm_fastroot != metad->btm_root)
516  ereport(DEBUG1,
517  (errcode(ERRCODE_NO_DATA),
518  errmsg("harmless fast root mismatch in index %s",
520  errdetail_internal("Fast root block %u (level %u) differs from true root block %u (level %u).",
521  metad->btm_fastroot, metad->btm_fastlevel,
522  metad->btm_root, metad->btm_level)));
523 
524  /*
525  * Starting at the root, verify every level. Move left to right, top to
526  * bottom. Note that there may be no pages other than the meta page (meta
527  * page can indicate that root is P_NONE when the index is totally empty).
528  */
529  previouslevel = InvalidBtreeLevel;
530  current.level = metad->btm_level;
531  current.leftmost = metad->btm_root;
532  current.istruerootlevel = true;
533  while (current.leftmost != P_NONE)
534  {
535  /*
536  * Leftmost page on level cannot be right half of incomplete split.
537  * This can go stale immediately in !readonly case.
538  */
539  state->rightsplit = false;
540 
541  /*
542  * Verify this level, and get left most page for next level down, if
543  * not at leaf level
544  */
545  current = bt_check_level_from_leftmost(state, current);
546 
547  if (current.leftmost == InvalidBlockNumber)
548  ereport(ERROR,
549  (errcode(ERRCODE_INDEX_CORRUPTED),
550  errmsg("index \"%s\" has no valid pages on level below %u or first level",
551  RelationGetRelationName(rel), previouslevel)));
552 
553  previouslevel = current.level;
554  }
555 
556  /*
557  * * Check whether heap contains unindexed/malformed tuples *
558  */
559  if (state->heapallindexed)
560  {
561  IndexInfo *indexinfo = BuildIndexInfo(state->rel);
562  TableScanDesc scan;
563 
564  /* Report on extra downlink checks performed in readonly case */
565  if (state->readonly)
566  {
567  ereport(DEBUG1,
568  (errmsg_internal("finished verifying presence of downlink blocks within index \"%s\" with bitset %.2f%% set",
570  100.0 * bloom_prop_bits_set(state->downlinkfilter))));
571  bloom_free(state->downlinkfilter);
572  }
573 
574  /*
575  * Create our own scan for table_index_build_scan(), rather than
576  * getting it to do so for us. This is required so that we can
577  * actually use the MVCC snapshot registered earlier in !readonly
578  * case.
579  *
580  * Note that table_index_build_scan() calls heap_endscan() for us.
581  */
582  scan = table_beginscan_strat(state->heaprel, /* relation */
583  snapshot, /* snapshot */
584  0, /* number of keys */
585  NULL, /* scan key */
586  true, /* buffer access strategy OK */
587  true); /* syncscan OK? */
588 
589  /*
590  * Scan will behave as the first scan of a CREATE INDEX CONCURRENTLY
591  * behaves in !readonly case.
592  *
593  * It's okay that we don't actually use the same lock strength for the
594  * heap relation as any other ii_Concurrent caller would in !readonly
595  * case. We have no reason to care about a concurrent VACUUM
596  * operation, since there isn't going to be a second scan of the heap
597  * that needs to be sure that there was no concurrent recycling of
598  * TIDs.
599  */
600  indexinfo->ii_Concurrent = !state->readonly;
601 
602  /*
603  * Don't wait for uncommitted tuple xact commit/abort when index is a
604  * unique index on a catalog (or an index used by an exclusion
605  * constraint). This could otherwise happen in the readonly case.
606  */
607  indexinfo->ii_Unique = false;
608  indexinfo->ii_ExclusionOps = NULL;
609  indexinfo->ii_ExclusionProcs = NULL;
610  indexinfo->ii_ExclusionStrats = NULL;
611 
612  elog(DEBUG1, "verifying that tuples from index \"%s\" are present in \"%s\"",
615 
616  table_index_build_scan(state->heaprel, state->rel, indexinfo, true, false,
617  bt_tuple_present_callback, (void *) state, scan);
618 
619  ereport(DEBUG1,
620  (errmsg_internal("finished verifying presence of " INT64_FORMAT " tuples from table \"%s\" with bitset %.2f%% set",
622  100.0 * bloom_prop_bits_set(state->filter))));
623 
624  if (snapshot != SnapshotAny)
625  UnregisterSnapshot(snapshot);
626 
627  bloom_free(state->filter);
628  }
629 
630  /* Be tidy: */
632 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
bloom_filter * bloom_create(int64 total_elems, int bloom_work_mem, uint64 seed)
Definition: bloomfilter.c:88
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:211
#define AllocSetContextCreate
Definition: memutils.h:170
#define DEBUG1
Definition: elog.h:25
int errhint(const char *fmt,...)
Definition: elog.c:1069
static BtreeLevel bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:865
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
uint32 level
bloom_filter * downlinkfilter
long random(void)
Definition: random.c:22
static void bt_tuple_present_callback(Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *checkstate)
#define IsolationUsesXactSnapshot()
Definition: xact.h:51
#define P_NONE
Definition: nbtree.h:206
Oid * ii_ExclusionProcs
Definition: execnodes.h:165
int errcode(int sqlerrcode)
Definition: elog.c:608
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:2293
#define MaxTIDsPerBTreePage
Definition: nbtree.h:179
Form_pg_class rd_rel
Definition: rel.h:84
BufferAccessStrategy checkstrategy
Definition: verify_nbtree.c:84
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:306
static TableScanDesc table_beginscan_strat(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, bool allow_strat, bool allow_sync)
Definition: tableam.h:779
bloom_filter * filter
int errdetail_internal(const char *fmt,...)
Definition: elog.c:982
struct HeapTupleData * rd_indextuple
Definition: rel.h:151
HeapTupleHeader t_data
Definition: htup.h:68
Form_pg_index rd_index
Definition: rel.h:149
BlockNumber btm_fastroot
Definition: nbtree.h:104
#define ERROR
Definition: elog.h:43
static double table_index_build_scan(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, bool allow_sync, bool progress, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition: tableam.h:1531
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:192
#define BTPageGetMeta(p)
Definition: nbtree.h:114
TransactionId RecentGlobalXmin
Definition: snapmgr.c:168
BlockNumber leftmost
#define RelationGetRelationName(relation)
Definition: rel.h:462
unsigned int uint32
Definition: c.h:367
TransactionId xmin
Definition: snapshot.h:157
MemoryContext CurrentMemoryContext
Definition: mcxt.c:38
#define ereport(elevel, rest)
Definition: elog.h:141
#define BTREE_METAPAGE
Definition: nbtree.h:141
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:907
void bloom_free(bloom_filter *filter)
Definition: bloomfilter.c:127
Relation heaprel
Definition: verify_nbtree.c:72
uint32 btm_fastlevel
Definition: nbtree.h:105
void * palloc0(Size size)
Definition: mcxt.c:980
BlockNumber btm_root
Definition: nbtree.h:102
int work_mem
Definition: globals.c:121
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:198
int maintenance_work_mem
Definition: globals.c:122
int errmsg_internal(const char *fmt,...)
Definition: elog.c:909
bool ii_Unique
Definition: execnodes.h:170
#define Max(x, y)
Definition: c.h:914
#define Assert(condition)
Definition: c.h:738
Definition: regguts.h:298
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define InvalidBlockNumber
Definition: block.h:33
bool ii_Concurrent
Definition: execnodes.h:172
#define INT64_FORMAT
Definition: c.h:409
#define SnapshotAny
Definition: snapmgr.h:69
double bloom_prop_bits_set(bloom_filter *filter)
Definition: bloomfilter.c:188
Oid * ii_ExclusionOps
Definition: execnodes.h:164
int errmsg(const char *fmt,...)
Definition: elog.c:822
uint32 btm_level
Definition: nbtree.h:103
#define elog(elevel,...)
Definition: elog.h:228
bool istruerootlevel
#define InvalidBtreeLevel
Definition: verify_nbtree.c:49
#define TransactionIdIsValid(xid)
Definition: transam.h:41
uint16 * ii_ExclusionStrats
Definition: execnodes.h:166
MemoryContext targetcontext
Definition: verify_nbtree.c:82
Pointer Page
Definition: bufpage.h:78

◆ bt_check_level_from_leftmost()

static BtreeLevel bt_check_level_from_leftmost ( BtreeCheckState state,
BtreeLevel  level 
)
static

Definition at line 653 of file verify_nbtree.c.

References bt_target_page_check(), BTPageOpaqueData::btpo, BTPageOpaqueData::btpo_next, BTPageOpaqueData::btpo_prev, BTreeTupleGetDownLink(), CHECK_FOR_INTERRUPTS, DEBUG1, DEBUG2, elog, ereport, errcode(), errdetail_internal(), errmsg(), ERROR, InvalidBlockNumber, InvalidBtreeLevel, BtreeLevel::istruerootlevel, BtreeLevel::leftmost, BTPageOpaqueData::level, BtreeLevel::level, MemoryContextReset(), MemoryContextSwitchTo(), P_FIRSTDATAKEY, P_IGNORE, P_INCOMPLETE_SPLIT, P_ISDELETED, P_ISLEAF, P_ISROOT, P_LEFTMOST, P_NONE, P_RIGHTMOST, PageGetItem, PageGetItemIdCareful(), PageGetLSN, PageGetSpecialPointer, palloc_btree_page(), BtreeCheckState::readonly, BtreeCheckState::rel, RelationGetRelationName, BtreeCheckState::rightsplit, BtreeCheckState::target, BtreeCheckState::targetblock, BtreeCheckState::targetcontext, and BtreeCheckState::targetlsn.

Referenced by bt_check_every_level().

654 {
655  /* State to establish early, concerning entire level */
656  BTPageOpaque opaque;
657  MemoryContext oldcontext;
658  BtreeLevel nextleveldown;
659 
660  /* Variables for iterating across level using right links */
661  BlockNumber leftcurrent = P_NONE;
662  BlockNumber current = level.leftmost;
663 
664  /* Initialize return state */
665  nextleveldown.leftmost = InvalidBlockNumber;
666  nextleveldown.level = InvalidBtreeLevel;
667  nextleveldown.istruerootlevel = false;
668 
669  /* Use page-level context for duration of this call */
670  oldcontext = MemoryContextSwitchTo(state->targetcontext);
671 
672  elog(DEBUG2, "verifying level %u%s", level.level,
673  level.istruerootlevel ?
674  " (true root level)" : level.level == 0 ? " (leaf level)" : "");
675 
676  do
677  {
678  /* Don't rely on CHECK_FOR_INTERRUPTS() calls at lower level */
680 
681  /* Initialize state for this iteration */
682  state->targetblock = current;
683  state->target = palloc_btree_page(state, state->targetblock);
684  state->targetlsn = PageGetLSN(state->target);
685 
686  opaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
687 
688  if (P_IGNORE(opaque))
689  {
690  /*
691  * Since there cannot be a concurrent VACUUM operation in readonly
692  * mode, and since a page has no links within other pages
693  * (siblings and parent) once it is marked fully deleted, it
694  * should be impossible to land on a fully deleted page in
695  * readonly mode. See bt_downlink_check() for further details.
696  *
697  * The bt_downlink_check() P_ISDELETED() check is repeated here so
698  * that pages that are only reachable through sibling links get
699  * checked.
700  */
701  if (state->readonly && P_ISDELETED(opaque))
702  ereport(ERROR,
703  (errcode(ERRCODE_INDEX_CORRUPTED),
704  errmsg("downlink or sibling link points to deleted block in index \"%s\"",
705  RelationGetRelationName(state->rel)),
706  errdetail_internal("Block=%u left block=%u left link from block=%u.",
707  current, leftcurrent, opaque->btpo_prev)));
708 
709  if (P_RIGHTMOST(opaque))
710  ereport(ERROR,
711  (errcode(ERRCODE_INDEX_CORRUPTED),
712  errmsg("block %u fell off the end of index \"%s\"",
713  current, RelationGetRelationName(state->rel))));
714  else
715  ereport(DEBUG1,
716  (errcode(ERRCODE_NO_DATA),
717  errmsg("block %u of index \"%s\" ignored",
718  current, RelationGetRelationName(state->rel))));
719  goto nextpage;
720  }
721  else if (nextleveldown.leftmost == InvalidBlockNumber)
722  {
723  /*
724  * A concurrent page split could make the caller supplied leftmost
725  * block no longer contain the leftmost page, or no longer be the
726  * true root, but where that isn't possible due to heavyweight
727  * locking, check that the first valid page meets caller's
728  * expectations.
729  */
730  if (state->readonly)
731  {
732  if (!P_LEFTMOST(opaque))
733  ereport(ERROR,
734  (errcode(ERRCODE_INDEX_CORRUPTED),
735  errmsg("block %u is not leftmost in index \"%s\"",
736  current, RelationGetRelationName(state->rel))));
737 
738  if (level.istruerootlevel && !P_ISROOT(opaque))
739  ereport(ERROR,
740  (errcode(ERRCODE_INDEX_CORRUPTED),
741  errmsg("block %u is not true root in index \"%s\"",
742  current, RelationGetRelationName(state->rel))));
743  }
744 
745  /*
746  * Before beginning any non-trivial examination of level, prepare
747  * state for next bt_check_level_from_leftmost() invocation for
748  * the next level for the next level down (if any).
749  *
750  * There should be at least one non-ignorable page per level,
751  * unless this is the leaf level, which is assumed by caller to be
752  * final level.
753  */
754  if (!P_ISLEAF(opaque))
755  {
756  IndexTuple itup;
757  ItemId itemid;
758 
759  /* Internal page -- downlink gets leftmost on next level */
760  itemid = PageGetItemIdCareful(state, state->targetblock,
761  state->target,
762  P_FIRSTDATAKEY(opaque));
763  itup = (IndexTuple) PageGetItem(state->target, itemid);
764  nextleveldown.leftmost = BTreeTupleGetDownLink(itup);
765  nextleveldown.level = opaque->btpo.level - 1;
766  }
767  else
768  {
769  /*
770  * Leaf page -- final level caller must process.
771  *
772  * Note that this could also be the root page, if there has
773  * been no root page split yet.
774  */
775  nextleveldown.leftmost = P_NONE;
776  nextleveldown.level = InvalidBtreeLevel;
777  }
778 
779  /*
780  * Finished setting up state for this call/level. Control will
781  * never end up back here in any future loop iteration for this
782  * level.
783  */
784  }
785 
786  /*
787  * readonly mode can only ever land on live pages and half-dead pages,
788  * so sibling pointers should always be in mutual agreement
789  */
790  if (state->readonly && opaque->btpo_prev != leftcurrent)
791  ereport(ERROR,
792  (errcode(ERRCODE_INDEX_CORRUPTED),
793  errmsg("left link/right link pair in index \"%s\" not in agreement",
794  RelationGetRelationName(state->rel)),
795  errdetail_internal("Block=%u left block=%u left link from block=%u.",
796  current, leftcurrent, opaque->btpo_prev)));
797 
798  /* Check level, which must be valid for non-ignorable page */
799  if (level.level != opaque->btpo.level)
800  ereport(ERROR,
801  (errcode(ERRCODE_INDEX_CORRUPTED),
802  errmsg("leftmost down link for level points to block in index \"%s\" whose level is not one level down",
803  RelationGetRelationName(state->rel)),
804  errdetail_internal("Block pointed to=%u expected level=%u level in pointed to block=%u.",
805  current, level.level, opaque->btpo.level)));
806 
807  /* Verify invariants for page */
808  bt_target_page_check(state);
809 
810 nextpage:
811 
812  /* Try to detect circular links */
813  if (current == leftcurrent || current == opaque->btpo_prev)
814  ereport(ERROR,
815  (errcode(ERRCODE_INDEX_CORRUPTED),
816  errmsg("circular link chain found in block %u of index \"%s\"",
817  current, RelationGetRelationName(state->rel))));
818 
819  /*
820  * Record if page that is about to become target is the right half of
821  * an incomplete page split. This can go stale immediately in
822  * !readonly case.
823  */
824  state->rightsplit = P_INCOMPLETE_SPLIT(opaque);
825 
826  leftcurrent = current;
827  current = opaque->btpo_next;
828 
829  /* Free page and associated memory for this iteration */
831  }
832  while (current != P_NONE);
833 
834  /* Don't change context for caller */
835  MemoryContextSwitchTo(oldcontext);
836 
837  return nextleveldown;
838 }
BlockNumber targetblock
Definition: verify_nbtree.c:93
BlockNumber btpo_next
Definition: nbtree.h:59
#define DEBUG1
Definition: elog.h:25
#define P_IGNORE(opaque)
Definition: nbtree.h:219
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
uint32 level
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:244
union BTPageOpaqueData::@46 btpo
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define P_NONE
Definition: nbtree.h:206
static BlockNumber BTreeTupleGetDownLink(IndexTuple pivot)
Definition: nbtree.h:431
int errcode(int sqlerrcode)
Definition: elog.c:608
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:136
uint32 BlockNumber
Definition: block.h:31
#define P_INCOMPLETE_SPLIT(opaque)
Definition: nbtree.h:221
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:69
int errdetail_internal(const char *fmt,...)
Definition: elog.c:982
XLogRecPtr targetlsn
Definition: verify_nbtree.c:95
#define ERROR
Definition: elog.h:43
#define DEBUG2
Definition: elog.h:24
BlockNumber btpo_prev
Definition: nbtree.h:58
BlockNumber leftmost
IndexTupleData * IndexTuple
Definition: itup.h:53
#define RelationGetRelationName(relation)
Definition: rel.h:462
#define P_LEFTMOST(opaque)
Definition: nbtree.h:212
#define ereport(elevel, rest)
Definition: elog.h:141
#define P_ISDELETED(opaque)
Definition: nbtree.h:216
#define P_ISROOT(opaque)
Definition: nbtree.h:215
uint32 level
Definition: nbtree.h:62
static void bt_target_page_check(BtreeCheckState *state)
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
#define InvalidBlockNumber
Definition: block.h:33
#define PageGetLSN(page)
Definition: bufpage.h:366
int errmsg(const char *fmt,...)
Definition: elog.c:822
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, OffsetNumber offset)
#define elog(elevel,...)
Definition: elog.h:228
bool istruerootlevel
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define InvalidBtreeLevel
Definition: verify_nbtree.c:49
MemoryContext targetcontext
Definition: verify_nbtree.c:82
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:213
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
#define P_ISLEAF(opaque)
Definition: nbtree.h:214

◆ bt_downlink_check()

static void bt_downlink_check ( BtreeCheckState state,
BTScanInsert  targetkey,
BlockNumber  childblock 
)
static

Definition at line 1583 of file verify_nbtree.c.

References Assert, ereport, errcode(), errdetail_internal(), errmsg(), ERROR, invariant_l_nontarget_offset(), offset_is_negative_infinity(), OffsetNumberNext, P_FIRSTDATAKEY, P_ISDELETED, PageGetMaxOffsetNumber, PageGetSpecialPointer, palloc_btree_page(), pfree(), BtreeCheckState::readonly, BtreeCheckState::rel, RelationGetRelationName, BtreeCheckState::targetblock, and BtreeCheckState::targetlsn.

Referenced by bt_target_page_check().

1585 {
1586  OffsetNumber offset;
1587  OffsetNumber maxoffset;
1588  Page child;
1589  BTPageOpaque copaque;
1590 
1591  /*
1592  * Caller must have ShareLock on target relation, because of
1593  * considerations around page deletion by VACUUM.
1594  *
1595  * NB: In general, page deletion deletes the right sibling's downlink, not
1596  * the downlink of the page being deleted; the deleted page's downlink is
1597  * reused for its sibling. The key space is thereby consolidated between
1598  * the deleted page and its right sibling. (We cannot delete a parent
1599  * page's rightmost child unless it is the last child page, and we intend
1600  * to also delete the parent itself.)
1601  *
1602  * If this verification happened without a ShareLock, the following race
1603  * condition could cause false positives:
1604  *
1605  * In general, concurrent page deletion might occur, including deletion of
1606  * the left sibling of the child page that is examined here. If such a
1607  * page deletion were to occur, closely followed by an insertion into the
1608  * newly expanded key space of the child, a window for the false positive
1609  * opens up: the stale parent/target downlink originally followed to get
1610  * to the child legitimately ceases to be a lower bound on all items in
1611  * the page, since the key space was concurrently expanded "left".
1612  * (Insertion followed the "new" downlink for the child, not our now-stale
1613  * downlink, which was concurrently physically removed in target/parent as
1614  * part of deletion's first phase.)
1615  *
1616  * Note that while the cross-page-same-level last item check uses a trick
1617  * that allows it to perform verification for !readonly callers, a similar
1618  * trick seems difficult here. The trick that that other check uses is,
1619  * in essence, to lock down race conditions to those that occur due to
1620  * concurrent page deletion of the target; that's a race that can be
1621  * reliably detected before actually reporting corruption.
1622  *
1623  * On the other hand, we'd need to lock down race conditions involving
1624  * deletion of child's left page, for long enough to read the child page
1625  * into memory (in other words, a scheme with concurrently held buffer
1626  * locks on both child and left-of-child pages). That's unacceptable for
1627  * amcheck functions on general principle, though.
1628  */
1629  Assert(state->readonly);
1630 
1631  /*
1632  * Verify child page has the downlink key from target page (its parent) as
1633  * a lower bound; downlink must be strictly less than all keys on the
1634  * page.
1635  *
1636  * Check all items, rather than checking just the first and trusting that
1637  * the operator class obeys the transitive law.
1638  */
1639  child = palloc_btree_page(state, childblock);
1640  copaque = (BTPageOpaque) PageGetSpecialPointer(child);
1641  maxoffset = PageGetMaxOffsetNumber(child);
1642 
1643  /*
1644  * Since there cannot be a concurrent VACUUM operation in readonly mode,
1645  * and since a page has no links within other pages (siblings and parent)
1646  * once it is marked fully deleted, it should be impossible to land on a
1647  * fully deleted page.
1648  *
1649  * It does not quite make sense to enforce that the page cannot even be
1650  * half-dead, despite the fact the downlink is modified at the same stage
1651  * that the child leaf page is marked half-dead. That's incorrect because
1652  * there may occasionally be multiple downlinks from a chain of pages
1653  * undergoing deletion, where multiple successive calls are made to
1654  * _bt_unlink_halfdead_page() by VACUUM before it can finally safely mark
1655  * the leaf page as fully dead. While _bt_mark_page_halfdead() usually
1656  * removes the downlink to the leaf page that is marked half-dead, that's
1657  * not guaranteed, so it's possible we'll land on a half-dead page with a
1658  * downlink due to an interrupted multi-level page deletion.
1659  *
1660  * We go ahead with our checks if the child page is half-dead. It's safe
1661  * to do so because we do not test the child's high key, so it does not
1662  * matter that the original high key will have been replaced by a dummy
1663  * truncated high key within _bt_mark_page_halfdead(). All other page
1664  * items are left intact on a half-dead page, so there is still something
1665  * to test.
1666  */
1667  if (P_ISDELETED(copaque))
1668  ereport(ERROR,
1669  (errcode(ERRCODE_INDEX_CORRUPTED),
1670  errmsg("downlink to deleted page found in index \"%s\"",
1671  RelationGetRelationName(state->rel)),
1672  errdetail_internal("Parent block=%u child block=%u parent page lsn=%X/%X.",
1673  state->targetblock, childblock,
1674  (uint32) (state->targetlsn >> 32),
1675  (uint32) state->targetlsn)));
1676 
1677  for (offset = P_FIRSTDATAKEY(copaque);
1678  offset <= maxoffset;
1679  offset = OffsetNumberNext(offset))
1680  {
1681  /*
1682  * Skip comparison of target page key against "negative infinity"
1683  * item, if any. Checking it would indicate that it's not a strict
1684  * lower bound, but that's only because of the hard-coding for
1685  * negative infinity items within _bt_compare().
1686  *
1687  * If nbtree didn't truncate negative infinity tuples during internal
1688  * page splits then we'd expect child's negative infinity key to be
1689  * equal to the scankey/downlink from target/parent (it would be a
1690  * "low key" in this hypothetical scenario, and so it would still need
1691  * to be treated as a special case here).
1692  *
1693  * Negative infinity items can be thought of as a strict lower bound
1694  * that works transitively, with the last non-negative-infinity pivot
1695  * followed during a descent from the root as its "true" strict lower
1696  * bound. Only a small number of negative infinity items are truly
1697  * negative infinity; those that are the first items of leftmost
1698  * internal pages. In more general terms, a negative infinity item is
1699  * only negative infinity with respect to the subtree that the page is
1700  * at the root of.
1701  *
1702  * See also: bt_rootdescend(), which can even detect transitive
1703  * inconsistencies on cousin leaf pages.
1704  */
1705  if (offset_is_negative_infinity(copaque, offset))
1706  continue;
1707 
1708  if (!invariant_l_nontarget_offset(state, targetkey, childblock, child,
1709  offset))
1710  ereport(ERROR,
1711  (errcode(ERRCODE_INDEX_CORRUPTED),
1712  errmsg("down-link lower bound invariant violated for index \"%s\"",
1713  RelationGetRelationName(state->rel)),
1714  errdetail_internal("Parent block=%u child index tid=(%u,%u) parent page lsn=%X/%X.",
1715  state->targetblock, childblock, offset,
1716  (uint32) (state->targetlsn >> 32),
1717  (uint32) state->targetlsn)));
1718  }
1719 
1720  pfree(child);
1721 }
BlockNumber targetblock
Definition: verify_nbtree.c:93
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:244
int errcode(int sqlerrcode)
Definition: elog.c:608
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:69
int errdetail_internal(const char *fmt,...)
Definition: elog.c:982
uint16 OffsetNumber
Definition: off.h:24
XLogRecPtr targetlsn
Definition: verify_nbtree.c:95
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:462
unsigned int uint32
Definition: c.h:367
#define ereport(elevel, rest)
Definition: elog.h:141
#define P_ISDELETED(opaque)
Definition: nbtree.h:216
static bool invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, BlockNumber nontargetblock, Page nontarget, OffsetNumber upperbound)
#define Assert(condition)
Definition: c.h:738
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
int errmsg(const char *fmt,...)
Definition: elog.c:822
static bool offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset)
Pointer Page
Definition: bufpage.h:78

◆ bt_downlink_missing_check()

static void bt_downlink_missing_check ( BtreeCheckState state)
static

Definition at line 1737 of file verify_nbtree.c.

References Assert, bloom_lacks_element(), BTPageOpaqueData::btpo, BTPageOpaqueData::btpo_prev, BTreeTupleGetDownLink(), BTreeTupleGetTopParent(), CHECK_FOR_INTERRUPTS, DEBUG1, BtreeCheckState::downlinkfilter, elog, ereport, errcode(), errdetail_internal(), errmsg(), errmsg_internal(), ERROR, BtreeCheckState::heapallindexed, BTPageOpaqueData::level, P_FIRSTDATAKEY, P_HIKEY, P_IGNORE, P_ISDELETED, P_ISHALFDEAD, P_ISLEAF, P_ISROOT, P_RIGHTMOST, PageGetItem, PageGetItemIdCareful(), PageGetSpecialPointer, palloc_btree_page(), pfree(), BtreeCheckState::readonly, BtreeCheckState::rel, RelationGetRelationName, BtreeCheckState::rightsplit, BtreeCheckState::target, BtreeCheckState::targetblock, and BtreeCheckState::targetlsn.

Referenced by bt_target_page_check().

1738 {
1740  ItemId itemid;
1741  IndexTuple itup;
1742  Page child;
1743  BTPageOpaque copaque;
1744  uint32 level;
1745  BlockNumber childblk;
1746 
1747  Assert(state->heapallindexed && state->readonly);
1748  Assert(!P_IGNORE(topaque));
1749 
1750  /* No next level up with downlinks to fingerprint from the true root */
1751  if (P_ISROOT(topaque))
1752  return;
1753 
1754  /*
1755  * Incomplete (interrupted) page splits can account for the lack of a
1756  * downlink. Some inserting transaction should eventually complete the
1757  * page split in passing, when it notices that the left sibling page is
1758  * P_INCOMPLETE_SPLIT().
1759  *
1760  * In general, VACUUM is not prepared for there to be no downlink to a
1761  * page that it deletes. This is the main reason why the lack of a
1762  * downlink can be reported as corruption here. It's not obvious that an
1763  * invalid missing downlink can result in wrong answers to queries,
1764  * though, since index scans that land on the child may end up
1765  * consistently moving right. The handling of concurrent page splits (and
1766  * page deletions) within _bt_moveright() cannot distinguish
1767  * inconsistencies that last for a moment from inconsistencies that are
1768  * permanent and irrecoverable.
1769  *
1770  * VACUUM isn't even prepared to delete pages that have no downlink due to
1771  * an incomplete page split, but it can detect and reason about that case
1772  * by design, so it shouldn't be taken to indicate corruption. See
1773  * _bt_pagedel() for full details.
1774  */
1775  if (state->rightsplit)
1776  {
1777  ereport(DEBUG1,
1778  (errcode(ERRCODE_NO_DATA),
1779  errmsg("harmless interrupted page split detected in index %s",
1780  RelationGetRelationName(state->rel)),
1781  errdetail_internal("Block=%u level=%u left sibling=%u page lsn=%X/%X.",
1782  state->targetblock, topaque->btpo.level,
1783  topaque->btpo_prev,
1784  (uint32) (state->targetlsn >> 32),
1785  (uint32) state->targetlsn)));
1786  return;
1787  }
1788 
1789  /* Target's downlink is typically present in parent/fingerprinted */
1790  if (!bloom_lacks_element(state->downlinkfilter,
1791  (unsigned char *) &state->targetblock,
1792  sizeof(BlockNumber)))
1793  return;
1794 
1795  /*
1796  * Target is probably the "top parent" of a multi-level page deletion.
1797  * We'll need to descend the subtree to make sure that descendant pages
1798  * are consistent with that, though.
1799  *
1800  * If the target page (which must be non-ignorable) is a leaf page, then
1801  * clearly it can't be the top parent. The lack of a downlink is probably
1802  * a symptom of a broad problem that could just as easily cause
1803  * inconsistencies anywhere else.
1804  */
1805  if (P_ISLEAF(topaque))
1806  ereport(ERROR,
1807  (errcode(ERRCODE_INDEX_CORRUPTED),
1808  errmsg("leaf index block lacks downlink in index \"%s\"",
1809  RelationGetRelationName(state->rel)),
1810  errdetail_internal("Block=%u page lsn=%X/%X.",
1811  state->targetblock,
1812  (uint32) (state->targetlsn >> 32),
1813  (uint32) state->targetlsn)));
1814 
1815  /* Descend from the target page, which is an internal page */
1816  elog(DEBUG1, "checking for interrupted multi-level deletion due to missing downlink in index \"%s\"",
1817  RelationGetRelationName(state->rel));
1818 
1819  level = topaque->btpo.level;
1820  itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
1821  P_FIRSTDATAKEY(topaque));
1822  itup = (IndexTuple) PageGetItem(state->target, itemid);
1823  childblk = BTreeTupleGetDownLink(itup);
1824  for (;;)
1825  {
1827 
1828  child = palloc_btree_page(state, childblk);
1829  copaque = (BTPageOpaque) PageGetSpecialPointer(child);
1830 
1831  if (P_ISLEAF(copaque))
1832  break;
1833 
1834  /* Do an extra sanity check in passing on internal pages */
1835  if (copaque->btpo.level != level - 1)
1836  ereport(ERROR,
1837  (errcode(ERRCODE_INDEX_CORRUPTED),
1838  errmsg_internal("downlink points to block in index \"%s\" whose level is not one level down",
1839  RelationGetRelationName(state->rel)),
1840  errdetail_internal("Top parent/target block=%u block pointed to=%u expected level=%u level in pointed to block=%u.",
1841  state->targetblock, childblk,
1842  level - 1, copaque->btpo.level)));
1843 
1844  level = copaque->btpo.level;
1845  itemid = PageGetItemIdCareful(state, childblk, child,
1846  P_FIRSTDATAKEY(copaque));
1847  itup = (IndexTuple) PageGetItem(child, itemid);
1848  childblk = BTreeTupleGetDownLink(itup);
1849  /* Be slightly more pro-active in freeing this memory, just in case */
1850  pfree(child);
1851  }
1852 
1853  /*
1854  * Since there cannot be a concurrent VACUUM operation in readonly mode,
1855  * and since a page has no links within other pages (siblings and parent)
1856  * once it is marked fully deleted, it should be impossible to land on a
1857  * fully deleted page. See bt_downlink_check() for further details.
1858  *
1859  * The bt_downlink_check() P_ISDELETED() check is repeated here because
1860  * bt_downlink_check() does not visit pages reachable through negative
1861  * infinity items. Besides, bt_downlink_check() is unwilling to descend
1862  * multiple levels. (The similar bt_downlink_check() P_ISDELETED() check
1863  * within bt_check_level_from_leftmost() won't reach the page either,
1864  * since the leaf's live siblings should have their sibling links updated
1865  * to bypass the deletion target page when it is marked fully dead.)
1866  *
1867  * If this error is raised, it might be due to a previous multi-level page
1868  * deletion that failed to realize that it wasn't yet safe to mark the
1869  * leaf page as fully dead. A "dangling downlink" will still remain when
1870  * this happens. The fact that the dangling downlink's page (the leaf's
1871  * parent/ancestor page) lacked a downlink is incidental.
1872  */
1873  if (P_ISDELETED(copaque))
1874  ereport(ERROR,
1875  (errcode(ERRCODE_INDEX_CORRUPTED),
1876  errmsg_internal("downlink to deleted leaf page found in index \"%s\"",
1877  RelationGetRelationName(state->rel)),
1878  errdetail_internal("Top parent/target block=%u leaf block=%u top parent/target lsn=%X/%X.",
1879  state->targetblock, childblk,
1880  (uint32) (state->targetlsn >> 32),
1881  (uint32) state->targetlsn)));
1882 
1883  /*
1884  * Iff leaf page is half-dead, its high key top parent link should point
1885  * to what VACUUM considered to be the top parent page at the instant it
1886  * was interrupted. Provided the high key link actually points to the
1887  * target page, the missing downlink we detected is consistent with there
1888  * having been an interrupted multi-level page deletion. This means that
1889  * the subtree with the target page at its root (a page deletion chain) is
1890  * in a consistent state, enabling VACUUM to resume deleting the entire
1891  * chain the next time it encounters the half-dead leaf page.
1892  */
1893  if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque))
1894  {
1895  itemid = PageGetItemIdCareful(state, childblk, child, P_HIKEY);
1896  itup = (IndexTuple) PageGetItem(child, itemid);
1897  if (BTreeTupleGetTopParent(itup) == state->targetblock)
1898  return;
1899  }
1900 
1901  ereport(ERROR,
1902  (errcode(ERRCODE_INDEX_CORRUPTED),
1903  errmsg("internal index block lacks downlink in index \"%s\"",
1904  RelationGetRelationName(state->rel)),
1905  errdetail_internal("Block=%u level=%u page lsn=%X/%X.",
1906  state->targetblock, topaque->btpo.level,
1907  (uint32) (state->targetlsn >> 32),
1908  (uint32) state->targetlsn)));
1909 }
BlockNumber targetblock
Definition: verify_nbtree.c:93
#define DEBUG1
Definition: elog.h:25
#define P_IGNORE(opaque)
Definition: nbtree.h:219
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
bloom_filter * downlinkfilter
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:244
union BTPageOpaqueData::@46 btpo
static BlockNumber BTreeTupleGetDownLink(IndexTuple pivot)
Definition: nbtree.h:431
int errcode(int sqlerrcode)
Definition: elog.c:608
uint32 BlockNumber
Definition: block.h:31
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:69
int errdetail_internal(const char *fmt,...)
Definition: elog.c:982
XLogRecPtr targetlsn
Definition: verify_nbtree.c:95
void pfree(void *pointer)
Definition: mcxt.c:1056
#define P_ISHALFDEAD(opaque)
Definition: nbtree.h:218
#define ERROR
Definition: elog.h:43
BlockNumber btpo_prev
Definition: nbtree.h:58
IndexTupleData * IndexTuple
Definition: itup.h:53
#define RelationGetRelationName(relation)
Definition: rel.h:462
unsigned int uint32
Definition: c.h:367
#define ereport(elevel, rest)
Definition: elog.h:141
#define P_ISDELETED(opaque)
Definition: nbtree.h:216
#define P_ISROOT(opaque)
Definition: nbtree.h:215
uint32 level
Definition: nbtree.h:62
int errmsg_internal(const char *fmt,...)
Definition: elog.c:909
#define Assert(condition)
Definition: c.h:738
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
#define P_HIKEY
Definition: nbtree.h:242
int errmsg(const char *fmt,...)
Definition: elog.c:822
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, OffsetNumber offset)
#define elog(elevel,...)
Definition: elog.h:228
static BlockNumber BTreeTupleGetTopParent(IndexTuple leafhikey)
Definition: nbtree.h:499
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:213
bool bloom_lacks_element(bloom_filter *filter, unsigned char *elem, size_t len)
Definition: bloomfilter.c:158
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define P_ISLEAF(opaque)
Definition: nbtree.h:214

◆ bt_index_check()

Datum bt_index_check ( PG_FUNCTION_ARGS  )

Definition at line 183 of file verify_nbtree.c.

References bt_index_check_internal(), BtreeCheckState::heapallindexed, PG_GETARG_BOOL, PG_GETARG_OID, PG_NARGS, and PG_RETURN_VOID.

184 {
185  Oid indrelid = PG_GETARG_OID(0);
186  bool heapallindexed = false;
187 
188  if (PG_NARGS() == 2)
189  heapallindexed = PG_GETARG_BOOL(1);
190 
191  bt_index_check_internal(indrelid, false, heapallindexed, false);
192 
193  PG_RETURN_VOID();
194 }
#define PG_GETARG_BOOL(n)
Definition: fmgr.h:269
unsigned int Oid
Definition: postgres_ext.h:31
static void bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed, bool rootdescend)
#define PG_GETARG_OID(n)
Definition: fmgr.h:270
#define PG_RETURN_VOID()
Definition: fmgr.h:339
#define PG_NARGS()
Definition: fmgr.h:198

◆ bt_index_check_internal()

static void bt_index_check_internal ( Oid  indrelid,
bool  parentcheck,
bool  heapallindexed,
bool  rootdescend 
)
static

Definition at line 226 of file verify_nbtree.c.

References _bt_metaversion(), AccessShareLock, bt_check_every_level(), btree_index_checkable(), btree_index_mainfork_expected(), ereport, errcode(), ERRCODE_UNDEFINED_TABLE, errmsg(), ERROR, BtreeCheckState::heapkeyspace, BtreeCheckState::heaprel, index_close(), index_open(), IndexGetRelation(), MAIN_FORKNUM, OidIsValid, RelationData::rd_smgr, RelationGetRelationName, RelationOpenSmgr, ShareLock, smgrexists(), table_close(), and table_open().

Referenced by bt_index_check(), and bt_index_parent_check().

228 {
229  Oid heapid;
230  Relation indrel;
231  Relation heaprel;
232  LOCKMODE lockmode;
233 
234  if (parentcheck)
235  lockmode = ShareLock;
236  else
237  lockmode = AccessShareLock;
238 
239  /*
240  * We must lock table before index to avoid deadlocks. However, if the
241  * passed indrelid isn't an index then IndexGetRelation() will fail.
242  * Rather than emitting a not-very-helpful error message, postpone
243  * complaining, expecting that the is-it-an-index test below will fail.
244  *
245  * In hot standby mode this will raise an error when parentcheck is true.
246  */
247  heapid = IndexGetRelation(indrelid, true);
248  if (OidIsValid(heapid))
249  heaprel = table_open(heapid, lockmode);
250  else
251  heaprel = NULL;
252 
253  /*
254  * Open the target index relations separately (like relation_openrv(), but
255  * with heap relation locked first to prevent deadlocking). In hot
256  * standby mode this will raise an error when parentcheck is true.
257  *
258  * There is no need for the usual indcheckxmin usability horizon test
259  * here, even in the heapallindexed case, because index undergoing
260  * verification only needs to have entries for a new transaction snapshot.
261  * (If this is a parentcheck verification, there is no question about
262  * committed or recently dead heap tuples lacking index entries due to
263  * concurrent activity.)
264  */
265  indrel = index_open(indrelid, lockmode);
266 
267  /*
268  * Since we did the IndexGetRelation call above without any lock, it's
269  * barely possible that a race against an index drop/recreation could have
270  * netted us the wrong table.
271  */
272  if (heaprel == NULL || heapid != IndexGetRelation(indrelid, false))
273  ereport(ERROR,
275  errmsg("could not open parent table of index %s",
276  RelationGetRelationName(indrel))));
277 
278  /* Relation suitable for checking as B-Tree? */
279  btree_index_checkable(indrel);
280 
281  if (btree_index_mainfork_expected(indrel))
282  {
283  bool heapkeyspace,
284  allequalimage;
285 
286  RelationOpenSmgr(indrel);
287  if (!smgrexists(indrel->rd_smgr, MAIN_FORKNUM))
288  ereport(ERROR,
289  (errcode(ERRCODE_INDEX_CORRUPTED),
290  errmsg("index \"%s\" lacks a main relation fork",
291  RelationGetRelationName(indrel))));
292 
293  /* Check index, possibly against table it is an index on */
294  _bt_metaversion(indrel, &heapkeyspace, &allequalimage);
295  bt_check_every_level(indrel, heaprel, heapkeyspace, parentcheck,
296  heapallindexed, rootdescend);
297  }
298 
299  /*
300  * Release locks early. That's ok here because nothing in the called
301  * routines will trigger shared cache invalidations to be sent, so we can
302  * relax the usual pattern of only releasing locks after commit.
303  */
304  index_close(indrel, lockmode);
305  if (heaprel)
306  table_close(heaprel, lockmode);
307 }
Oid IndexGetRelation(Oid indexId, bool missing_ok)
Definition: index.c:3388
static bool btree_index_mainfork_expected(Relation rel)
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:133
#define ERRCODE_UNDEFINED_TABLE
Definition: pgbench.c:73
static void btree_index_checkable(Relation rel)
int LOCKMODE
Definition: lockdefs.h:26
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
#define AccessShareLock
Definition: lockdefs.h:36
int errcode(int sqlerrcode)
Definition: elog.c:608
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:247
unsigned int Oid
Definition: postgres_ext.h:31
#define OidIsValid(objectId)
Definition: c.h:644
#define RelationOpenSmgr(relation)
Definition: rel.h:485
#define ERROR
Definition: elog.h:43
static void bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace, bool readonly, bool heapallindexed, bool rootdescend)
#define RelationGetRelationName(relation)
Definition: rel.h:462
#define ereport(elevel, rest)
Definition: elog.h:141
void _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
Definition: nbtpage.c:660
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:152
int errmsg(const char *fmt,...)
Definition: elog.c:822
#define ShareLock
Definition: lockdefs.h:41
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:39
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:126

◆ bt_index_parent_check()

Datum bt_index_parent_check ( PG_FUNCTION_ARGS  )

Definition at line 206 of file verify_nbtree.c.

References bt_index_check_internal(), BtreeCheckState::heapallindexed, PG_GETARG_BOOL, PG_GETARG_OID, PG_NARGS, PG_RETURN_VOID, and BtreeCheckState::rootdescend.

207 {
208  Oid indrelid = PG_GETARG_OID(0);
209  bool heapallindexed = false;
210  bool rootdescend = false;
211 
212  if (PG_NARGS() >= 2)
213  heapallindexed = PG_GETARG_BOOL(1);
214  if (PG_NARGS() == 3)
215  rootdescend = PG_GETARG_BOOL(2);
216 
217  bt_index_check_internal(indrelid, true, heapallindexed, rootdescend);
218 
219  PG_RETURN_VOID();
220 }
#define PG_GETARG_BOOL(n)
Definition: fmgr.h:269
unsigned int Oid
Definition: postgres_ext.h:31
static void bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed, bool rootdescend)
#define PG_GETARG_OID(n)
Definition: fmgr.h:270
#define PG_RETURN_VOID()
Definition: fmgr.h:339
#define PG_NARGS()
Definition: fmgr.h:198

◆ bt_mkscankey_pivotsearch()

static BTScanInsert bt_mkscankey_pivotsearch ( Relation  rel,
IndexTuple  itup 
)
inlinestatic

Definition at line 2593 of file verify_nbtree.c.

References _bt_mkscankey(), and BTScanInsertData::pivotsearch.

Referenced by bt_right_page_check_scankey(), and bt_target_page_check().

2594 {
2595  BTScanInsert skey;
2596 
2597  skey = _bt_mkscankey(rel, itup);
2598  skey->pivotsearch = true;
2599 
2600  return skey;
2601 }
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:90
bool pivotsearch
Definition: nbtree.h:673

◆ bt_normalize_tuple()

static IndexTuple bt_normalize_tuple ( BtreeCheckState state,
IndexTuple  itup 
)
static

Definition at line 2035 of file verify_nbtree.c.

References Assert, BTreeTupleIsPivot(), BTreeTupleIsPosting(), DatumGetPointer, ereport, errcode(), errmsg(), ERROR, i, index_form_tuple(), index_getattr, INDEX_MAX_KEYS, IndexTupleHasVarwidths, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, TupleDescData::natts, pfree(), PG_DETOAST_DATUM, PointerGetDatum, BtreeCheckState::rel, RelationGetDescr, RelationGetRelationName, IndexTupleData::t_tid, TupleDescAttr, VARATT_IS_COMPRESSED, and VARATT_IS_EXTERNAL.

Referenced by bt_target_page_check(), and bt_tuple_present_callback().

2036 {
2037  TupleDesc tupleDescriptor = RelationGetDescr(state->rel);
2038  Datum normalized[INDEX_MAX_KEYS];
2039  bool isnull[INDEX_MAX_KEYS];
2040  bool toast_free[INDEX_MAX_KEYS];
2041  bool formnewtup = false;
2042  IndexTuple reformed;
2043  int i;
2044 
2045  /* Caller should only pass "logical" non-pivot tuples here */
2046  Assert(!BTreeTupleIsPosting(itup) && !BTreeTupleIsPivot(itup));
2047 
2048  /* Easy case: It's immediately clear that tuple has no varlena datums */
2049  if (!IndexTupleHasVarwidths(itup))
2050  return itup;
2051 
2052  for (i = 0; i < tupleDescriptor->natts; i++)
2053  {
2054  Form_pg_attribute att;
2055 
2056  att = TupleDescAttr(tupleDescriptor, i);
2057 
2058  /* Assume untoasted/already normalized datum initially */
2059  toast_free[i] = false;
2060  normalized[i] = index_getattr(itup, att->attnum,
2061  tupleDescriptor,
2062  &isnull[i]);
2063  if (att->attbyval || att->attlen != -1 || isnull[i])
2064  continue;
2065 
2066  /*
2067  * Callers always pass a tuple that could safely be inserted into the
2068  * index without further processing, so an external varlena header
2069  * should never be encountered here
2070  */
2071  if (VARATT_IS_EXTERNAL(DatumGetPointer(normalized[i])))
2072  ereport(ERROR,
2073  (errcode(ERRCODE_INDEX_CORRUPTED),
2074  errmsg("external varlena datum in tuple that references heap row (%u,%u) in index \"%s\"",
2075  ItemPointerGetBlockNumber(&(itup->t_tid)),
2077  RelationGetRelationName(state->rel))));
2078  else if (VARATT_IS_COMPRESSED(DatumGetPointer(normalized[i])))
2079  {
2080  formnewtup = true;
2081  normalized[i] = PointerGetDatum(PG_DETOAST_DATUM(normalized[i]));
2082  toast_free[i] = true;
2083  }
2084  }
2085 
2086  /* Easier case: Tuple has varlena datums, none of which are compressed */
2087  if (!formnewtup)
2088  return itup;
2089 
2090  /*
2091  * Hard case: Tuple had compressed varlena datums that necessitate
2092  * creating normalized version of the tuple from uncompressed input datums
2093  * (normalized input datums). This is rather naive, but shouldn't be
2094  * necessary too often.
2095  *
2096  * Note that we rely on deterministic index_form_tuple() TOAST compression
2097  * of normalized input.
2098  */
2099  reformed = index_form_tuple(tupleDescriptor, normalized, isnull);
2100  reformed->t_tid = itup->t_tid;
2101 
2102  /* Cannot leak memory here */
2103  for (i = 0; i < tupleDescriptor->natts; i++)
2104  if (toast_free[i])
2105  pfree(DatumGetPointer(normalized[i]));
2106 
2107  return reformed;
2108 }
#define VARATT_IS_COMPRESSED(PTR)
Definition: postgres.h:312
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:357
#define RelationGetDescr(relation)
Definition: rel.h:454
#define PointerGetDatum(X)
Definition: postgres.h:556
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
ItemPointerData t_tid
Definition: itup.h:37
int errcode(int sqlerrcode)
Definition: elog.c:608
#define VARATT_IS_EXTERNAL(PTR)
Definition: postgres.h:313
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:47
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:462
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:200
#define ereport(elevel, rest)
Definition: elog.h:141
uintptr_t Datum
Definition: postgres.h:367
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:369
#define Assert(condition)
Definition: c.h:738
#define INDEX_MAX_KEYS
#define index_getattr(tup, attnum, tupleDesc, isnull)
Definition: itup.h:100
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define IndexTupleHasVarwidths(itup)
Definition: itup.h:73
#define DatumGetPointer(X)
Definition: postgres.h:549
int errmsg(const char *fmt,...)
Definition: elog.c:822
int i
#define PG_DETOAST_DATUM(datum)
Definition: fmgr.h:235
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98

◆ bt_posting_plain_tuple()

static IndexTuple bt_posting_plain_tuple ( IndexTuple  itup,
int  n 
)
inlinestatic

Definition at line 2125 of file verify_nbtree.c.

References _bt_form_posting(), Assert, BTreeTupleGetPostingN(), and BTreeTupleIsPosting().

Referenced by bt_target_page_check().

2126 {
2127  Assert(BTreeTupleIsPosting(itup));
2128 
2129  /* Returns non-posting-list tuple */
2130  return _bt_form_posting(itup, BTreeTupleGetPostingN(itup, n), 1);
2131 }
IndexTuple _bt_form_posting(IndexTuple base, ItemPointer htids, int nhtids)
Definition: nbtdedup.c:601
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:419
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:369
#define Assert(condition)
Definition: c.h:738

◆ bt_right_page_check_scankey()

static BTScanInsert bt_right_page_check_scankey ( BtreeCheckState state)
static

Definition at line 1376 of file verify_nbtree.c.

References bt_mkscankey_pivotsearch(), BTPageOpaqueData::btpo, BTPageOpaqueData::btpo_next, CHECK_FOR_INTERRUPTS, DEBUG1, ereport, errcode(), errdetail_internal(), errmsg(), BTPageOpaqueData::level, OffsetNumberNext, P_FIRSTDATAKEY, P_IGNORE, P_ISLEAF, P_RIGHTMOST, PageGetItem, PageGetItemIdCareful(), PageGetMaxOffsetNumber, PageGetSpecialPointer, palloc_btree_page(), pfree(), BtreeCheckState::rel, RelationGetRelationName, and BtreeCheckState::target.

Referenced by bt_target_page_check().

1377 {
1378  BTPageOpaque opaque;
1379  ItemId rightitem;
1380  IndexTuple firstitup;
1381  BlockNumber targetnext;
1382  Page rightpage;
1383  OffsetNumber nline;
1384 
1385  /* Determine target's next block number */
1386  opaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
1387 
1388  /* If target is already rightmost, no right sibling; nothing to do here */
1389  if (P_RIGHTMOST(opaque))
1390  return NULL;
1391 
1392  /*
1393  * General notes on concurrent page splits and page deletion:
1394  *
1395  * Routines like _bt_search() don't require *any* page split interlock
1396  * when descending the tree, including something very light like a buffer
1397  * pin. That's why it's okay that we don't either. This avoidance of any
1398  * need to "couple" buffer locks is the raison d' etre of the Lehman & Yao
1399  * algorithm, in fact.
1400  *
1401  * That leaves deletion. A deleted page won't actually be recycled by
1402  * VACUUM early enough for us to fail to at least follow its right link
1403  * (or left link, or downlink) and find its sibling, because recycling
1404  * does not occur until no possible index scan could land on the page.
1405  * Index scans can follow links with nothing more than their snapshot as
1406  * an interlock and be sure of at least that much. (See page
1407  * recycling/RecentGlobalXmin notes in nbtree README.)
1408  *
1409  * Furthermore, it's okay if we follow a rightlink and find a half-dead or
1410  * dead (ignorable) page one or more times. There will either be a
1411  * further right link to follow that leads to a live page before too long
1412  * (before passing by parent's rightmost child), or we will find the end
1413  * of the entire level instead (possible when parent page is itself the
1414  * rightmost on its level).
1415  */
1416  targetnext = opaque->btpo_next;
1417  for (;;)
1418  {
1420 
1421  rightpage = palloc_btree_page(state, targetnext);
1422  opaque = (BTPageOpaque) PageGetSpecialPointer(rightpage);
1423 
1424  if (!P_IGNORE(opaque) || P_RIGHTMOST(opaque))
1425  break;
1426 
1427  /* We landed on a deleted page, so step right to find a live page */
1428  targetnext = opaque->btpo_next;
1429  ereport(DEBUG1,
1430  (errcode(ERRCODE_NO_DATA),
1431  errmsg("level %u leftmost page of index \"%s\" was found deleted or half dead",
1432  opaque->btpo.level, RelationGetRelationName(state->rel)),
1433  errdetail_internal("Deleted page found when building scankey from right sibling.")));
1434 
1435  /* Be slightly more pro-active in freeing this memory, just in case */
1436  pfree(rightpage);
1437  }
1438 
1439  /*
1440  * No ShareLock held case -- why it's safe to proceed.
1441  *
1442  * Problem:
1443  *
1444  * We must avoid false positive reports of corruption when caller treats
1445  * item returned here as an upper bound on target's last item. In
1446  * general, false positives are disallowed. Avoiding them here when
1447  * caller is !readonly is subtle.
1448  *
1449  * A concurrent page deletion by VACUUM of the target page can result in
1450  * the insertion of items on to this right sibling page that would
1451  * previously have been inserted on our target page. There might have
1452  * been insertions that followed the target's downlink after it was made
1453  * to point to right sibling instead of target by page deletion's first
1454  * phase. The inserters insert items that would belong on target page.
1455  * This race is very tight, but it's possible. This is our only problem.
1456  *
1457  * Non-problems:
1458  *
1459  * We are not hindered by a concurrent page split of the target; we'll
1460  * never land on the second half of the page anyway. A concurrent split
1461  * of the right page will also not matter, because the first data item
1462  * remains the same within the left half, which we'll reliably land on. If
1463  * we had to skip over ignorable/deleted pages, it cannot matter because
1464  * their key space has already been atomically merged with the first
1465  * non-ignorable page we eventually find (doesn't matter whether the page
1466  * we eventually find is a true sibling or a cousin of target, which we go
1467  * into below).
1468  *
1469  * Solution:
1470  *
1471  * Caller knows that it should reverify that target is not ignorable
1472  * (half-dead or deleted) when cross-page sibling item comparison appears
1473  * to indicate corruption (invariant fails). This detects the single race
1474  * condition that exists for caller. This is correct because the
1475  * continued existence of target block as non-ignorable (not half-dead or
1476  * deleted) implies that target page was not merged into from the right by
1477  * deletion; the key space at or after target never moved left. Target's
1478  * parent either has the same downlink to target as before, or a <
1479  * downlink due to deletion at the left of target. Target either has the
1480  * same highkey as before, or a highkey < before when there is a page
1481  * split. (The rightmost concurrently-split-from-target-page page will
1482  * still have the same highkey as target was originally found to have,
1483  * which for our purposes is equivalent to target's highkey itself never
1484  * changing, since we reliably skip over
1485  * concurrently-split-from-target-page pages.)
1486  *
1487  * In simpler terms, we allow that the key space of the target may expand
1488  * left (the key space can move left on the left side of target only), but
1489  * the target key space cannot expand right and get ahead of us without
1490  * our detecting it. The key space of the target cannot shrink, unless it
1491  * shrinks to zero due to the deletion of the original page, our canary
1492  * condition. (To be very precise, we're a bit stricter than that because
1493  * it might just have been that the target page split and only the
1494  * original target page was deleted. We can be more strict, just not more
1495  * lax.)
1496  *
1497  * Top level tree walk caller moves on to next page (makes it the new
1498  * target) following recovery from this race. (cf. The rationale for
1499  * child/downlink verification needing a ShareLock within
1500  * bt_downlink_check(), where page deletion is also the main source of
1501  * trouble.)
1502  *
1503  * Note that it doesn't matter if right sibling page here is actually a
1504  * cousin page, because in order for the key space to be readjusted in a
1505  * way that causes us issues in next level up (guiding problematic
1506  * concurrent insertions to the cousin from the grandparent rather than to
1507  * the sibling from the parent), there'd have to be page deletion of
1508  * target's parent page (affecting target's parent's downlink in target's
1509  * grandparent page). Internal page deletion only occurs when there are
1510  * no child pages (they were all fully deleted), and caller is checking
1511  * that the target's parent has at least one non-deleted (so
1512  * non-ignorable) child: the target page. (Note that the first phase of
1513  * deletion atomically marks the page to be deleted half-dead/ignorable at
1514  * the same time downlink in its parent is removed, so caller will
1515  * definitely not fail to detect that this happened.)
1516  *
1517  * This trick is inspired by the method backward scans use for dealing
1518  * with concurrent page splits; concurrent page deletion is a problem that
1519  * similarly receives special consideration sometimes (it's possible that
1520  * the backwards scan will re-read its "original" block after failing to
1521  * find a right-link to it, having already moved in the opposite direction
1522  * (right/"forwards") a few times to try to locate one). Just like us,
1523  * that happens only to determine if there was a concurrent page deletion
1524  * of a reference page, and just like us if there was a page deletion of
1525  * that reference page it means we can move on from caring about the
1526  * reference page. See the nbtree README for a full description of how
1527  * that works.
1528  */
1529  nline = PageGetMaxOffsetNumber(rightpage);
1530 
1531  /*
1532  * Get first data item, if any
1533  */
1534  if (P_ISLEAF(opaque) && nline >= P_FIRSTDATAKEY(opaque))
1535  {
1536  /* Return first data item (if any) */
1537  rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
1538  P_FIRSTDATAKEY(opaque));
1539  }
1540  else if (!P_ISLEAF(opaque) &&
1541  nline >= OffsetNumberNext(P_FIRSTDATAKEY(opaque)))
1542  {
1543  /*
1544  * Return first item after the internal page's "negative infinity"
1545  * item
1546  */
1547  rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
1548  OffsetNumberNext(P_FIRSTDATAKEY(opaque)));
1549  }
1550  else
1551  {
1552  /*
1553  * No first item. Page is probably empty leaf page, but it's also
1554  * possible that it's an internal page with only a negative infinity
1555  * item.
1556  */
1557  ereport(DEBUG1,
1558  (errcode(ERRCODE_NO_DATA),
1559  errmsg("%s block %u of index \"%s\" has no first data item",
1560  P_ISLEAF(opaque) ? "leaf" : "internal", targetnext,
1561  RelationGetRelationName(state->rel))));
1562  return NULL;
1563  }
1564 
1565  /*
1566  * Return first real item scankey. Note that this relies on right page
1567  * memory remaining allocated.
1568  */
1569  firstitup = (IndexTuple) PageGetItem(rightpage, rightitem);
1570  return bt_mkscankey_pivotsearch(state->rel, firstitup);
1571 }
BlockNumber btpo_next
Definition: nbtree.h:59
#define DEBUG1
Definition: elog.h:25
#define P_IGNORE(opaque)
Definition: nbtree.h:219
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:244
union BTPageOpaqueData::@46 btpo
int errcode(int sqlerrcode)
Definition: elog.c:608
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:69
int errdetail_internal(const char *fmt,...)
Definition: elog.c:982
uint16 OffsetNumber
Definition: off.h:24
void pfree(void *pointer)
Definition: mcxt.c:1056
IndexTupleData * IndexTuple
Definition: itup.h:53
#define RelationGetRelationName(relation)
Definition: rel.h:462
#define ereport(elevel, rest)
Definition: elog.h:141
uint32 level
Definition: nbtree.h:62
static BTScanInsert bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup)
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
int errmsg(const char *fmt,...)
Definition: elog.c:822
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, OffsetNumber offset)
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:213
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define P_ISLEAF(opaque)
Definition: nbtree.h:214

◆ bt_rootdescend()

static bool bt_rootdescend ( BtreeCheckState state,
IndexTuple  itup 
)
static

Definition at line 2158 of file verify_nbtree.c.

References _bt_binsrch_insert(), _bt_compare(), _bt_freestack(), _bt_mkscankey(), _bt_relbuf(), _bt_search(), Assert, BTInsertStateData::bounds_valid, BT_READ, BTInsertStateData::buf, BufferGetPage, BufferIsValid, BTScanInsertData::heapkeyspace, IndexTupleSize, BTInsertStateData::itemsz, BTInsertStateData::itup, BTInsertStateData::itup_key, sort-test::key, MAXALIGN, PageGetMaxOffsetNumber, pfree(), BTInsertStateData::postingoff, BtreeCheckState::readonly, BtreeCheckState::rel, BtreeCheckState::rootdescend, and BTScanInsertData::scantid.

Referenced by bt_target_page_check().

2159 {
2160  BTScanInsert key;
2161  BTStack stack;
2162  Buffer lbuf;
2163  bool exists;
2164 
2165  key = _bt_mkscankey(state->rel, itup);
2166  Assert(key->heapkeyspace && key->scantid != NULL);
2167 
2168  /*
2169  * Search from root.
2170  *
2171  * Ideally, we would arrange to only move right within _bt_search() when
2172  * an interrupted page split is detected (i.e. when the incomplete split
2173  * bit is found to be set), but for now we accept the possibility that
2174  * that could conceal an inconsistency.
2175  */
2176  Assert(state->readonly && state->rootdescend);
2177  exists = false;
2178  stack = _bt_search(state->rel, key, &lbuf, BT_READ, NULL);
2179 
2180  if (BufferIsValid(lbuf))
2181  {
2182  BTInsertStateData insertstate;
2183  OffsetNumber offnum;
2184  Page page;
2185 
2186  insertstate.itup = itup;
2187  insertstate.itemsz = MAXALIGN(IndexTupleSize(itup));
2188  insertstate.itup_key = key;
2189  insertstate.postingoff = 0;
2190  insertstate.bounds_valid = false;
2191  insertstate.buf = lbuf;
2192 
2193  /* Get matching tuple on leaf page */
2194  offnum = _bt_binsrch_insert(state->rel, &insertstate);
2195  /* Compare first >= matching item on leaf page, if any */
2196  page = BufferGetPage(lbuf);
2197  /* Should match on first heap TID when tuple has a posting list */
2198  if (offnum <= PageGetMaxOffsetNumber(page) &&
2199  insertstate.postingoff <= 0 &&
2200  _bt_compare(state->rel, key, page, offnum) == 0)
2201  exists = true;
2202  _bt_relbuf(state->rel, lbuf);
2203  }
2204 
2205  _bt_freestack(stack);
2206  pfree(key);
2207 
2208  return exists;
2209 }
void _bt_freestack(BTStack stack)
Definition: nbtutils.c:175
bool bounds_valid
Definition: nbtree.h:706
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:90
ItemPointer scantid
Definition: nbtree.h:674
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
#define BT_READ
Definition: nbtree.h:597
void pfree(void *pointer)
Definition: mcxt.c:1056
OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate)
Definition: nbtsearch.c:452
IndexTuple itup
Definition: nbtree.h:694
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:649
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:940
BTScanInsert itup_key
Definition: nbtree.h:696
#define Assert(condition)
Definition: c.h:738
bool heapkeyspace
Definition: nbtree.h:669
#define MAXALIGN(LEN)
Definition: c.h:691
#define BufferIsValid(bufnum)
Definition: bufmgr.h:113
int Buffer
Definition: buf.h:23
BTStack _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access, Snapshot snapshot)
Definition: nbtsearch.c:100
Pointer Page
Definition: bufpage.h:78
#define IndexTupleSize(itup)
Definition: itup.h:71

◆ bt_target_page_check()

static void bt_target_page_check ( BtreeCheckState state)
static

Definition at line 875 of file verify_nbtree.c.

References _bt_check_natts(), bloom_add_element(), bt_downlink_check(), bt_downlink_missing_check(), bt_mkscankey_pivotsearch(), bt_normalize_tuple(), bt_posting_plain_tuple(), bt_right_page_check_scankey(), bt_rootdescend(), BTMaxItemSize, BTMaxItemSizeNoHeapTid, BTreeTupleGetDownLink(), BTreeTupleGetHeapTID(), BTreeTupleGetMaxHeapTID(), BTreeTupleGetNAtts, BTreeTupleGetNPosting(), BTreeTupleGetPointsToTID(), BTreeTupleGetPostingN(), BTreeTupleIsPosting(), CHECK_FOR_INTERRUPTS, DEBUG2, BtreeCheckState::downlinkfilter, elog, ereport, errcode(), errdetail_internal(), errhint(), errmsg(), errmsg_internal(), ERROR, BtreeCheckState::filter, BtreeCheckState::heapallindexed, BtreeCheckState::heapkeyspace, BTScanInsertData::heapkeyspace, i, IndexTupleSize, invariant_g_offset(), invariant_l_offset(), invariant_leq_offset(), ItemIdGetLength, ItemIdIsDead, ItemPointerCompare(), ItemPointerCopy, ItemPointerGetBlockNumber, ItemPointerGetBlockNumberNoCheck, ItemPointerGetOffsetNumber, ItemPointerGetOffsetNumberNoCheck, offset_is_negative_infinity(), OffsetNumberNext, P_FIRSTDATAKEY, P_HIKEY, P_IGNORE, P_ISLEAF, P_RIGHTMOST, PageGetItem, PageGetItemIdCareful(), PageGetMaxOffsetNumber, PageGetSpecialPointer, palloc_btree_page(), pfree(), psprintf(), BtreeCheckState::readonly, BtreeCheckState::rel, RelationGetRelationName, BtreeCheckState::rootdescend, BTScanInsertData::scantid, BtreeCheckState::target, BtreeCheckState::targetblock, and BtreeCheckState::targetlsn.

Referenced by bt_check_level_from_leftmost().

876 {
877  OffsetNumber offset;
878  OffsetNumber max;
879  BTPageOpaque topaque;
880 
881  topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
882  max = PageGetMaxOffsetNumber(state->target);
883 
884  elog(DEBUG2, "verifying %u items on %s block %u", max,
885  P_ISLEAF(topaque) ? "leaf" : "internal", state->targetblock);
886 
887  /*
888  * Check the number of attributes in high key. Note, rightmost page
889  * doesn't contain a high key, so nothing to check
890  */
891  if (!P_RIGHTMOST(topaque))
892  {
893  ItemId itemid;
894  IndexTuple itup;
895 
896  /* Verify line pointer before checking tuple */
897  itemid = PageGetItemIdCareful(state, state->targetblock,
898  state->target, P_HIKEY);
899  if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
900  P_HIKEY))
901  {
902  itup = (IndexTuple) PageGetItem(state->target, itemid);
903  ereport(ERROR,
904  (errcode(ERRCODE_INDEX_CORRUPTED),
905  errmsg("wrong number of high key index tuple attributes in index \"%s\"",
906  RelationGetRelationName(state->rel)),
907  errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.",
908  state->targetblock,
909  BTreeTupleGetNAtts(itup, state->rel),
910  P_ISLEAF(topaque) ? "heap" : "index",
911  (uint32) (state->targetlsn >> 32),
912  (uint32) state->targetlsn)));
913  }
914  }
915 
916  /*
917  * Loop over page items, starting from first non-highkey item, not high
918  * key (if any). Most tests are not performed for the "negative infinity"
919  * real item (if any).
920  */
921  for (offset = P_FIRSTDATAKEY(topaque);
922  offset <= max;
923  offset = OffsetNumberNext(offset))
924  {
925  ItemId itemid;
926  IndexTuple itup;
927  size_t tupsize;
928  BTScanInsert skey;
929  bool lowersizelimit;
930  ItemPointer scantid;
931 
933 
934  itemid = PageGetItemIdCareful(state, state->targetblock,
935  state->target, offset);
936  itup = (IndexTuple) PageGetItem(state->target, itemid);
937  tupsize = IndexTupleSize(itup);
938 
939  /*
940  * lp_len should match the IndexTuple reported length exactly, since
941  * lp_len is completely redundant in indexes, and both sources of
942  * tuple length are MAXALIGN()'d. nbtree does not use lp_len all that
943  * frequently, and is surprisingly tolerant of corrupt lp_len fields.
944  */
945  if (tupsize != ItemIdGetLength(itemid))
946  ereport(ERROR,
947  (errcode(ERRCODE_INDEX_CORRUPTED),
948  errmsg("index tuple size does not equal lp_len in index \"%s\"",
949  RelationGetRelationName(state->rel)),
950  errdetail_internal("Index tid=(%u,%u) tuple size=%zu lp_len=%u page lsn=%X/%X.",
951  state->targetblock, offset,
952  tupsize, ItemIdGetLength(itemid),
953  (uint32) (state->targetlsn >> 32),
954  (uint32) state->targetlsn),
955  errhint("This could be a torn page problem.")));
956 
957  /* Check the number of index tuple attributes */
958  if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
959  offset))
960  {
961  ItemPointer tid;
962  char *itid,
963  *htid;
964 
965  itid = psprintf("(%u,%u)", state->targetblock, offset);
966  tid = BTreeTupleGetPointsToTID(itup);
967  htid = psprintf("(%u,%u)",
970 
971  ereport(ERROR,
972  (errcode(ERRCODE_INDEX_CORRUPTED),
973  errmsg("wrong number of index tuple attributes in index \"%s\"",
974  RelationGetRelationName(state->rel)),
975  errdetail_internal("Index tid=%s natts=%u points to %s tid=%s page lsn=%X/%X.",
976  itid,
977  BTreeTupleGetNAtts(itup, state->rel),
978  P_ISLEAF(topaque) ? "heap" : "index",
979  htid,
980  (uint32) (state->targetlsn >> 32),
981  (uint32) state->targetlsn)));
982  }
983 
984  /* Fingerprint downlink blocks in heapallindexed + readonly case */
985  if (state->heapallindexed && state->readonly && !P_ISLEAF(topaque))
986  {
987  BlockNumber childblock = BTreeTupleGetDownLink(itup);
988 
990  (unsigned char *) &childblock,
991  sizeof(BlockNumber));
992  }
993 
994  /*
995  * Don't try to generate scankey using "negative infinity" item on
996  * internal pages. They are always truncated to zero attributes.
997  */
998  if (offset_is_negative_infinity(topaque, offset))
999  continue;
1000 
1001  /*
1002  * Readonly callers may optionally verify that non-pivot tuples can
1003  * each be found by an independent search that starts from the root.
1004  * Note that we deliberately don't do individual searches for each
1005  * TID, since the posting list itself is validated by other checks.
1006  */
1007  if (state->rootdescend && P_ISLEAF(topaque) &&
1008  !bt_rootdescend(state, itup))
1009  {
1011  char *itid,
1012  *htid;
1013 
1014  itid = psprintf("(%u,%u)", state->targetblock, offset);
1015  htid = psprintf("(%u,%u)", ItemPointerGetBlockNumber(tid),
1017 
1018  ereport(ERROR,
1019  (errcode(ERRCODE_INDEX_CORRUPTED),
1020  errmsg("could not find tuple using search from root page in index \"%s\"",
1021  RelationGetRelationName(state->rel)),
1022  errdetail_internal("Index tid=%s points to heap tid=%s page lsn=%X/%X.",
1023  itid, htid,
1024  (uint32) (state->targetlsn >> 32),
1025  (uint32) state->targetlsn)));
1026  }
1027 
1028  /*
1029  * If tuple is a posting list tuple, make sure posting list TIDs are
1030  * in order
1031  */
1032  if (BTreeTupleIsPosting(itup))
1033  {
1034  ItemPointerData last;
1035  ItemPointer current;
1036 
1037  ItemPointerCopy(BTreeTupleGetHeapTID(itup), &last);
1038 
1039  for (int i = 1; i < BTreeTupleGetNPosting(itup); i++)
1040  {
1041 
1042  current = BTreeTupleGetPostingN(itup, i);
1043 
1044  if (ItemPointerCompare(current, &last) <= 0)
1045  {
1046  char *itid = psprintf("(%u,%u)", state->targetblock, offset);
1047 
1048  ereport(ERROR,
1049  (errcode(ERRCODE_INDEX_CORRUPTED),
1050  errmsg_internal("posting list contains misplaced TID in index \"%s\"",
1051  RelationGetRelationName(state->rel)),
1052  errdetail_internal("Index tid=%s posting list offset=%d page lsn=%X/%X.",
1053  itid, i,
1054  (uint32) (state->targetlsn >> 32),
1055  (uint32) state->targetlsn)));
1056  }
1057 
1058  ItemPointerCopy(current, &last);
1059  }
1060  }
1061 
1062  /* Build insertion scankey for current page offset */
1063  skey = bt_mkscankey_pivotsearch(state->rel, itup);
1064 
1065  /*
1066  * Make sure tuple size does not exceed the relevant BTREE_VERSION
1067  * specific limit.
1068  *
1069  * BTREE_VERSION 4 (which introduced heapkeyspace rules) requisitioned
1070  * a small amount of space from BTMaxItemSize() in order to ensure
1071  * that suffix truncation always has enough space to add an explicit
1072  * heap TID back to a tuple -- we pessimistically assume that every
1073  * newly inserted tuple will eventually need to have a heap TID
1074  * appended during a future leaf page split, when the tuple becomes
1075  * the basis of the new high key (pivot tuple) for the leaf page.
1076  *
1077  * Since the reclaimed space is reserved for that purpose, we must not
1078  * enforce the slightly lower limit when the extra space has been used
1079  * as intended. In other words, there is only a cross-version
1080  * difference in the limit on tuple size within leaf pages.
1081  *
1082  * Still, we're particular about the details within BTREE_VERSION 4
1083  * internal pages. Pivot tuples may only use the extra space for its
1084  * designated purpose. Enforce the lower limit for pivot tuples when
1085  * an explicit heap TID isn't actually present. (In all other cases
1086  * suffix truncation is guaranteed to generate a pivot tuple that's no
1087  * larger than the first right tuple provided to it by its caller.)
1088  */
1089  lowersizelimit = skey->heapkeyspace &&
1090  (P_ISLEAF(topaque) || BTreeTupleGetHeapTID(itup) == NULL);
1091  if (tupsize > (lowersizelimit ? BTMaxItemSize(state->target) :
1092  BTMaxItemSizeNoHeapTid(state->target)))
1093  {
1095  char *itid,
1096  *htid;
1097 
1098  itid = psprintf("(%u,%u)", state->targetblock, offset);
1099  htid = psprintf("(%u,%u)",
1102 
1103  ereport(ERROR,
1104  (errcode(ERRCODE_INDEX_CORRUPTED),
1105  errmsg("index row size %zu exceeds maximum for index \"%s\"",
1106  tupsize, RelationGetRelationName(state->rel)),
1107  errdetail_internal("Index tid=%s points to %s tid=%s page lsn=%X/%X.",
1108  itid,
1109  P_ISLEAF(topaque) ? "heap" : "index",
1110  htid,
1111  (uint32) (state->targetlsn >> 32),
1112  (uint32) state->targetlsn)));
1113  }
1114 
1115  /* Fingerprint leaf page tuples (those that point to the heap) */
1116  if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid))
1117  {
1118  IndexTuple norm;
1119 
1120  if (BTreeTupleIsPosting(itup))
1121  {
1122  /* Fingerprint all elements as distinct "plain" tuples */
1123  for (int i = 0; i < BTreeTupleGetNPosting(itup); i++)
1124  {
1125  IndexTuple logtuple;
1126 
1127  logtuple = bt_posting_plain_tuple(itup, i);
1128  norm = bt_normalize_tuple(state, logtuple);
1129  bloom_add_element(state->filter, (unsigned char *) norm,
1130  IndexTupleSize(norm));
1131  /* Be tidy */
1132  if (norm != logtuple)
1133  pfree(norm);
1134  pfree(logtuple);
1135  }
1136  }
1137  else
1138  {
1139  norm = bt_normalize_tuple(state, itup);
1140  bloom_add_element(state->filter, (unsigned char *) norm,
1141  IndexTupleSize(norm));
1142  /* Be tidy */
1143  if (norm != itup)
1144  pfree(norm);
1145  }
1146  }
1147 
1148  /*
1149  * * High key check *
1150  *
1151  * If there is a high key (if this is not the rightmost page on its
1152  * entire level), check that high key actually is upper bound on all
1153  * page items. If this is a posting list tuple, we'll need to set
1154  * scantid to be highest TID in posting list.
1155  *
1156  * We prefer to check all items against high key rather than checking
1157  * just the last and trusting that the operator class obeys the
1158  * transitive law (which implies that all previous items also
1159  * respected the high key invariant if they pass the item order
1160  * check).
1161  *
1162  * Ideally, we'd compare every item in the index against every other
1163  * item in the index, and not trust opclass obedience of the
1164  * transitive law to bridge the gap between children and their
1165  * grandparents (as well as great-grandparents, and so on). We don't
1166  * go to those lengths because that would be prohibitively expensive,
1167  * and probably not markedly more effective in practice.
1168  *
1169  * On the leaf level, we check that the key is <= the highkey.
1170  * However, on non-leaf levels we check that the key is < the highkey,
1171  * because the high key is "just another separator" rather than a copy
1172  * of some existing key item; we expect it to be unique among all keys
1173  * on the same level. (Suffix truncation will sometimes produce a
1174  * leaf highkey that is an untruncated copy of the lastleft item, but
1175  * never any other item, which necessitates weakening the leaf level
1176  * check to <=.)
1177  *
1178  * Full explanation for why a highkey is never truly a copy of another
1179  * item from the same level on internal levels:
1180  *
1181  * While the new left page's high key is copied from the first offset
1182  * on the right page during an internal page split, that's not the
1183  * full story. In effect, internal pages are split in the middle of
1184  * the firstright tuple, not between the would-be lastleft and
1185  * firstright tuples: the firstright key ends up on the left side as
1186  * left's new highkey, and the firstright downlink ends up on the
1187  * right side as right's new "negative infinity" item. The negative
1188  * infinity tuple is truncated to zero attributes, so we're only left
1189  * with the downlink. In other words, the copying is just an
1190  * implementation detail of splitting in the middle of a (pivot)
1191  * tuple. (See also: "Notes About Data Representation" in the nbtree
1192  * README.)
1193  */
1194  scantid = skey->scantid;
1195  if (state->heapkeyspace && BTreeTupleIsPosting(itup))
1196  skey->scantid = BTreeTupleGetMaxHeapTID(itup);
1197 
1198  if (!P_RIGHTMOST(topaque) &&
1199  !(P_ISLEAF(topaque) ? invariant_leq_offset(state, skey, P_HIKEY) :
1200  invariant_l_offset(state, skey, P_HIKEY)))
1201  {
1203  char *itid,
1204  *htid;
1205 
1206  itid = psprintf("(%u,%u)", state->targetblock, offset);
1207  htid = psprintf("(%u,%u)",
1210 
1211  ereport(ERROR,
1212  (errcode(ERRCODE_INDEX_CORRUPTED),
1213  errmsg("high key invariant violated for index \"%s\"",
1214  RelationGetRelationName(state->rel)),
1215  errdetail_internal("Index tid=%s points to %s tid=%s page lsn=%X/%X.",
1216  itid,
1217  P_ISLEAF(topaque) ? "heap" : "index",
1218  htid,
1219  (uint32) (state->targetlsn >> 32),
1220  (uint32) state->targetlsn)));
1221  }
1222  /* Reset, in case scantid was set to (itup) posting tuple's max TID */
1223  skey->scantid = scantid;
1224 
1225  /*
1226  * * Item order check *
1227  *
1228  * Check that items are stored on page in logical order, by checking
1229  * current item is strictly less than next item (if any).
1230  */
1231  if (OffsetNumberNext(offset) <= max &&
1232  !invariant_l_offset(state, skey, OffsetNumberNext(offset)))
1233  {
1234  ItemPointer tid;
1235  char *itid,
1236  *htid,
1237  *nitid,
1238  *nhtid;
1239 
1240  itid = psprintf("(%u,%u)", state->targetblock, offset);
1241  tid = BTreeTupleGetPointsToTID(itup);
1242  htid = psprintf("(%u,%u)",
1245  nitid = psprintf("(%u,%u)", state->targetblock,
1246  OffsetNumberNext(offset));
1247 
1248  /* Reuse itup to get pointed-to heap location of second item */
1249  itemid = PageGetItemIdCareful(state, state->targetblock,
1250  state->target,
1251  OffsetNumberNext(offset));
1252  itup = (IndexTuple) PageGetItem(state->target, itemid);
1253  tid = BTreeTupleGetPointsToTID(itup);
1254  nhtid = psprintf("(%u,%u)",
1257 
1258  ereport(ERROR,
1259  (errcode(ERRCODE_INDEX_CORRUPTED),
1260  errmsg("item order invariant violated for index \"%s\"",
1261  RelationGetRelationName(state->rel)),
1262  errdetail_internal("Lower index tid=%s (points to %s tid=%s) "
1263  "higher index tid=%s (points to %s tid=%s) "
1264  "page lsn=%X/%X.",
1265  itid,
1266  P_ISLEAF(topaque) ? "heap" : "index",
1267  htid,
1268  nitid,
1269  P_ISLEAF(topaque) ? "heap" : "index",
1270  nhtid,
1271  (uint32) (state->targetlsn >> 32),
1272  (uint32) state->targetlsn)));
1273  }
1274 
1275  /*
1276  * * Last item check *
1277  *
1278  * Check last item against next/right page's first data item's when
1279  * last item on page is reached. This additional check will detect
1280  * transposed pages iff the supposed right sibling page happens to
1281  * belong before target in the key space. (Otherwise, a subsequent
1282  * heap verification will probably detect the problem.)
1283  *
1284  * This check is similar to the item order check that will have
1285  * already been performed for every other "real" item on target page
1286  * when last item is checked. The difference is that the next item
1287  * (the item that is compared to target's last item) needs to come
1288  * from the next/sibling page. There may not be such an item
1289  * available from sibling for various reasons, though (e.g., target is
1290  * the rightmost page on level).
1291  */
1292  else if (offset == max)
1293  {
1294  BTScanInsert rightkey;
1295 
1296  /* Get item in next/right page */
1297  rightkey = bt_right_page_check_scankey(state);
1298 
1299  if (rightkey &&
1300  !invariant_g_offset(state, rightkey, max))
1301  {
1302  /*
1303  * As explained at length in bt_right_page_check_scankey(),
1304  * there is a known !readonly race that could account for
1305  * apparent violation of invariant, which we must check for
1306  * before actually proceeding with raising error. Our canary
1307  * condition is that target page was deleted.
1308  */
1309  if (!state->readonly)
1310  {
1311  /* Get fresh copy of target page */
1312  state->target = palloc_btree_page(state, state->targetblock);
1313  /* Note that we deliberately do not update target LSN */
1314  topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
1315 
1316  /*
1317  * All !readonly checks now performed; just return
1318  */
1319  if (P_IGNORE(topaque))
1320  return;
1321  }
1322 
1323  ereport(ERROR,
1324  (errcode(ERRCODE_INDEX_CORRUPTED),
1325  errmsg("cross page item order invariant violated for index \"%s\"",
1326  RelationGetRelationName(state->rel)),
1327  errdetail_internal("Last item on page tid=(%u,%u) page lsn=%X/%X.",
1328  state->targetblock, offset,
1329  (uint32) (state->targetlsn >> 32),
1330  (uint32) state->targetlsn)));
1331  }
1332  }
1333 
1334  /*
1335  * * Downlink check *
1336  *
1337  * Additional check of child items iff this is an internal page and
1338  * caller holds a ShareLock. This happens for every downlink (item)
1339  * in target excluding the negative-infinity downlink (again, this is
1340  * because it has no useful value to compare).
1341  */
1342  if (!P_ISLEAF(topaque) && state->readonly)
1343  {
1344  BlockNumber childblock = BTreeTupleGetDownLink(itup);
1345 
1346  bt_downlink_check(state, skey, childblock);
1347  }
1348  }
1349 
1350  /*
1351  * * Check if page has a downlink in parent *
1352  *
1353  * This can only be checked in heapallindexed + readonly case.
1354  */
1355  if (state->heapallindexed && state->readonly)
1357 }
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
#define ItemPointerGetOffsetNumberNoCheck(pointer)
Definition: itemptr.h:108
static bool invariant_leq_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound)
static ItemPointer BTreeTupleGetPointsToTID(IndexTuple itup)
BlockNumber targetblock
Definition: verify_nbtree.c:93
int errhint(const char *fmt,...)
Definition: elog.c:1069
#define P_IGNORE(opaque)
Definition: nbtree.h:219
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:517
ItemPointer scantid
Definition: nbtree.h:674
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
bloom_filter * downlinkfilter
void bloom_add_element(bloom_filter *filter, unsigned char *elem, size_t len)
Definition: bloomfilter.c:136
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:244
static IndexTuple bt_normalize_tuple(BtreeCheckState *state, IndexTuple itup)
static BlockNumber BTreeTupleGetDownLink(IndexTuple pivot)
Definition: nbtree.h:431
#define BTMaxItemSizeNoHeapTid(page)
Definition: nbtree.h:163
int errcode(int sqlerrcode)
Definition: elog.c:608
static BTScanInsert bt_right_page_check_scankey(BtreeCheckState *state)
uint32 BlockNumber
Definition: block.h:31
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define BTreeTupleGetNAtts(itup, rel)
Definition: nbtree.h:452
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:69
bloom_filter * filter
int errdetail_internal(const char *fmt,...)
Definition: elog.c:982
uint16 OffsetNumber
Definition: off.h:24
static bool invariant_l_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound)
static bool invariant_g_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber lowerbound)
XLogRecPtr targetlsn
Definition: verify_nbtree.c:95
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ERROR
Definition: elog.h:43
#define DEBUG2
Definition: elog.h:24
IndexTupleData * IndexTuple
Definition: itup.h:53
#define RelationGetRelationName(relation)
Definition: rel.h:462
unsigned int uint32
Definition: c.h:367
static void bt_downlink_missing_check(BtreeCheckState *state)
#define ereport(elevel, rest)
Definition: elog.h:141
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:543
static BTScanInsert bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup)
static void bt_downlink_check(BtreeCheckState *state, BTScanInsert targetkey, BlockNumber childblock)
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:419
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:369
int errmsg_internal(const char *fmt,...)
Definition: elog.c:909
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition: nbtutils.c:2484
bool heapkeyspace
Definition: nbtree.h:669
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define BTMaxItemSize(page)
Definition: nbtree.h:157
#define P_HIKEY
Definition: nbtree.h:242
static bool bt_rootdescend(BtreeCheckState *state, IndexTuple itup)
int errmsg(const char *fmt,...)
Definition: elog.c:822
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, OffsetNumber offset)
#define ItemPointerGetBlockNumberNoCheck(pointer)
Definition: itemptr.h:89
#define elog(elevel,...)
Definition: elog.h:228
int i
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:393
static bool offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset)
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:213
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
#define IndexTupleSize(itup)
Definition: itup.h:71
#define ItemPointerCopy(fromPointer, toPointer)
Definition: itemptr.h:161
static IndexTuple bt_posting_plain_tuple(IndexTuple itup, int n)
#define P_ISLEAF(opaque)
Definition: nbtree.h:214

◆ bt_tuple_present_callback()

static void bt_tuple_present_callback ( Relation  index,
ItemPointer  tid,
Datum values,
bool isnull,
bool  tupleIsAlive,
void *  checkstate 
)
static

Definition at line 1967 of file verify_nbtree.c.

References Assert, bloom_lacks_element(), bt_normalize_tuple(), ereport, errcode(), ERRCODE_DATA_CORRUPTED, errhint(), errmsg(), ERROR, BtreeCheckState::filter, BtreeCheckState::heapallindexed, BtreeCheckState::heaprel, BtreeCheckState::heaptuplespresent, index_form_tuple(), IndexTupleSize, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, pfree(), BtreeCheckState::readonly, BtreeCheckState::rel, RelationGetDescr, RelationGetRelationName, and IndexTupleData::t_tid.

Referenced by bt_check_every_level().

1969 {
1970  BtreeCheckState *state = (BtreeCheckState *) checkstate;
1971  IndexTuple itup,
1972  norm;
1973 
1974  Assert(state->heapallindexed);
1975 
1976  /* Generate a normalized index tuple for fingerprinting */
1977  itup = index_form_tuple(RelationGetDescr(index), values, isnull);
1978  itup->t_tid = *tid;
1979  norm = bt_normalize_tuple(state, itup);
1980 
1981  /* Probe Bloom filter -- tuple should be present */
1982  if (bloom_lacks_element(state->filter, (unsigned char *) norm,
1983  IndexTupleSize(norm)))
1984  ereport(ERROR,
1986  errmsg("heap tuple (%u,%u) from table \"%s\" lacks matching index tuple within index \"%s\"",
1987  ItemPointerGetBlockNumber(&(itup->t_tid)),
1990  RelationGetRelationName(state->rel)),
1991  !state->readonly
1992  ? errhint("Retrying verification using the function bt_index_parent_check() might provide a more specific error.")
1993  : 0));
1994 
1995  state->heaptuplespresent++;
1996  pfree(itup);
1997  /* Cannot leak memory here */
1998  if (norm != itup)
1999  pfree(norm);
2000 }
int errhint(const char *fmt,...)
Definition: elog.c:1069
#define RelationGetDescr(relation)
Definition: rel.h:454
static IndexTuple bt_normalize_tuple(BtreeCheckState *state, IndexTuple itup)
ItemPointerData t_tid
Definition: itup.h:37
int errcode(int sqlerrcode)
Definition: elog.c:608
bloom_filter * filter
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:47
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:462
#define ereport(elevel, rest)
Definition: elog.h:141
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
Relation heaprel
Definition: verify_nbtree.c:72
#define Assert(condition)
Definition: c.h:738
Definition: regguts.h:298
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
static Datum values[MAXATTR]
Definition: bootstrap.c:167
int errmsg(const char *fmt,...)
Definition: elog.c:822
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
bool bloom_lacks_element(bloom_filter *filter, unsigned char *elem, size_t len)
Definition: bloomfilter.c:158
#define IndexTupleSize(itup)
Definition: itup.h:71

◆ btree_index_checkable()

static void btree_index_checkable ( Relation  rel)
inlinestatic

Definition at line 318 of file verify_nbtree.c.

References ereport, errcode(), errdetail(), errmsg(), ERROR, RelationData::rd_index, RelationData::rd_rel, RELATION_IS_OTHER_TEMP, and RelationGetRelationName.

Referenced by bt_index_check_internal().

319 {
320  if (rel->rd_rel->relkind != RELKIND_INDEX ||
321  rel->rd_rel->relam != BTREE_AM_OID)
322  ereport(ERROR,
323  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
324  errmsg("only B-Tree indexes are supported as targets for verification"),
325  errdetail("Relation \"%s\" is not a B-Tree index.",
326  RelationGetRelationName(rel))));
327 
328  if (RELATION_IS_OTHER_TEMP(rel))
329  ereport(ERROR,
330  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
331  errmsg("cannot access temporary tables of other sessions"),
332  errdetail("Index \"%s\" is associated with temporary relation.",
333  RelationGetRelationName(rel))));
334 
335  if (!rel->rd_index->indisvalid)
336  ereport(ERROR,
337  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
338  errmsg("cannot check index \"%s\"",
340  errdetail("Index is not valid.")));
341 }
int errcode(int sqlerrcode)
Definition: elog.c:608
Form_pg_class rd_rel
Definition: rel.h:84
Form_pg_index rd_index
Definition: rel.h:149
#define ERROR
Definition: elog.h:43
int errdetail(const char *fmt,...)
Definition: elog.c:955
#define RelationGetRelationName(relation)
Definition: rel.h:462
#define ereport(elevel, rest)
Definition: elog.h:141
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:558
int errmsg(const char *fmt,...)
Definition: elog.c:822

◆ btree_index_mainfork_expected()

static bool btree_index_mainfork_expected ( Relation  rel)
inlinestatic

Definition at line 351 of file verify_nbtree.c.

References ereport, errcode(), errmsg(), NOTICE, RelationData::rd_rel, RecoveryInProgress(), and RelationGetRelationName.

Referenced by bt_index_check_internal().

352 {
353  if (rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED ||
355  return true;
356 
357  ereport(NOTICE,
358  (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
359  errmsg("cannot verify unlogged index \"%s\" during recovery, skipping",
360  RelationGetRelationName(rel))));
361 
362  return false;
363 }
int errcode(int sqlerrcode)
Definition: elog.c:608
Form_pg_class rd_rel
Definition: rel.h:84
bool RecoveryInProgress(void)
Definition: xlog.c:7964
#define RelationGetRelationName(relation)
Definition: rel.h:462
#define ereport(elevel, rest)
Definition: elog.h:141
#define NOTICE
Definition: elog.h:37
int errmsg(const char *fmt,...)
Definition: elog.c:822

◆ BTreeTupleGetHeapTIDCareful()

static ItemPointer BTreeTupleGetHeapTIDCareful ( BtreeCheckState state,
IndexTuple  itup,
bool  nonpivot 
)
inlinestatic

Definition at line 2657 of file verify_nbtree.c.

References Assert, BTreeTupleGetHeapTID(), BTreeTupleIsPivot(), ereport, errcode(), errmsg(), errmsg_internal(), ERROR, BtreeCheckState::heapkeyspace, ItemPointerIsValid, BtreeCheckState::rel, RelationGetRelationName, and BtreeCheckState::targetblock.

Referenced by invariant_l_nontarget_offset(), and invariant_l_offset().

2659 {
2660  ItemPointer htid;
2661 
2662  /*
2663  * Caller determines whether this is supposed to be a pivot or non-pivot
2664  * tuple using page type and item offset number. Verify that tuple
2665  * metadata agrees with this.
2666  */
2667  Assert(state->heapkeyspace);
2668  if (BTreeTupleIsPivot(itup) && nonpivot)
2669  ereport(ERROR,
2670  (errcode(ERRCODE_INDEX_CORRUPTED),
2671  errmsg_internal("block %u or its right sibling block or child block in index \"%s\" has unexpected pivot tuple",
2672  state->targetblock,
2673  RelationGetRelationName(state->rel))));
2674 
2675  if (!BTreeTupleIsPivot(itup) && !nonpivot)
2676  ereport(ERROR,
2677  (errcode(ERRCODE_INDEX_CORRUPTED),
2678  errmsg_internal("block %u or its right sibling block or child block in index \"%s\" has unexpected non-pivot tuple",
2679  state->targetblock,
2680  RelationGetRelationName(state->rel))));
2681 
2682  htid = BTreeTupleGetHeapTID(itup);
2683  if (!ItemPointerIsValid(htid) && nonpivot)
2684  ereport(ERROR,
2685  (errcode(ERRCODE_INDEX_CORRUPTED),
2686  errmsg("block %u or its right sibling block or child block in index \"%s\" contains non-pivot tuple that lacks a heap TID",
2687  state->targetblock,
2688  RelationGetRelationName(state->rel))));
2689 
2690  return htid;
2691 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
BlockNumber targetblock
Definition: verify_nbtree.c:93
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:357
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:517
int errcode(int sqlerrcode)
Definition: elog.c:608
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:462
#define ereport(elevel, rest)
Definition: elog.h:141
int errmsg_internal(const char *fmt,...)
Definition: elog.c:909
#define Assert(condition)
Definition: c.h:738
int errmsg(const char *fmt,...)
Definition: elog.c:822

◆ BTreeTupleGetPointsToTID()

static ItemPointer BTreeTupleGetPointsToTID ( IndexTuple  itup)
inlinestatic

Definition at line 2705 of file verify_nbtree.c.

References BTreeTupleGetHeapTID(), BTreeTupleIsPivot(), and IndexTupleData::t_tid.

Referenced by bt_target_page_check().

2706 {
2707  /*
2708  * Rely on the assumption that !heapkeyspace internal page data items will
2709  * correctly return TID with downlink here -- BTreeTupleGetHeapTID() won't
2710  * recognize it as a pivot tuple, but everything still works out because
2711  * the t_tid field is still returned
2712  */
2713  if (!BTreeTupleIsPivot(itup))
2714  return BTreeTupleGetHeapTID(itup);
2715 
2716  /* Pivot tuple returns TID with downlink block (heapkeyspace variant) */
2717  return &itup->t_tid;
2718 }
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:357
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:517
ItemPointerData t_tid
Definition: itup.h:37

◆ invariant_g_offset()

static bool invariant_g_offset ( BtreeCheckState state,
BTScanInsert  key,
OffsetNumber  lowerbound 
)
inlinestatic

Definition at line 2343 of file verify_nbtree.c.

References _bt_compare(), Assert, cmp(), BTScanInsertData::heapkeyspace, BTScanInsertData::pivotsearch, BtreeCheckState::rel, and BtreeCheckState::target.

Referenced by bt_target_page_check().

2345 {
2346  int32 cmp;
2347 
2348  Assert(key->pivotsearch);
2349 
2350  cmp = _bt_compare(state->rel, key, state->target, lowerbound);
2351 
2352  /* pg_upgrade'd indexes may legally have equal sibling tuples */
2353  if (!key->heapkeyspace)
2354  return cmp >= 0;
2355 
2356  /*
2357  * No need to consider the possibility that scankey has attributes that we
2358  * need to force to be interpreted as negative infinity. _bt_compare() is
2359  * able to determine that scankey is greater than negative infinity. The
2360  * distinction between "==" and "<" isn't interesting here, since
2361  * corruption is indicated either way.
2362  */
2363  return cmp > 0;
2364 }
signed int int32
Definition: c.h:355
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:649
bool pivotsearch
Definition: nbtree.h:673
#define Assert(condition)
Definition: c.h:738
bool heapkeyspace
Definition: nbtree.h:669
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:742

◆ invariant_l_nontarget_offset()

static bool invariant_l_nontarget_offset ( BtreeCheckState state,
BTScanInsert  key,
BlockNumber  nontargetblock,
Page  nontarget,
OffsetNumber  upperbound 
)
inlinestatic

Definition at line 2379 of file verify_nbtree.c.

References _bt_compare(), Assert, BTreeTupleGetHeapTIDCareful(), BTreeTupleGetNKeyAtts, cmp(), BTScanInsertData::heapkeyspace, BTScanInsertData::keysz, P_FIRSTDATAKEY, P_ISLEAF, PageGetItem, PageGetItemIdCareful(), PageGetSpecialPointer, BTScanInsertData::pivotsearch, BtreeCheckState::rel, and BTScanInsertData::scantid.

Referenced by bt_downlink_check().

2382 {
2383  ItemId itemid;
2384  int32 cmp;
2385 
2386  Assert(key->pivotsearch);
2387 
2388  /* Verify line pointer before checking tuple */
2389  itemid = PageGetItemIdCareful(state, nontargetblock, nontarget,
2390  upperbound);
2391  cmp = _bt_compare(state->rel, key, nontarget, upperbound);
2392 
2393  /* pg_upgrade'd indexes may legally have equal sibling tuples */
2394  if (!key->heapkeyspace)
2395  return cmp <= 0;
2396 
2397  /* See invariant_l_offset() for an explanation of this extra step */
2398  if (cmp == 0)
2399  {
2400  IndexTuple child;
2401  int uppnkeyatts;
2402  ItemPointer childheaptid;
2403  BTPageOpaque copaque;
2404  bool nonpivot;
2405 
2406  child = (IndexTuple) PageGetItem(nontarget, itemid);
2407  copaque = (BTPageOpaque) PageGetSpecialPointer(nontarget);
2408  nonpivot = P_ISLEAF(copaque) && upperbound >= P_FIRSTDATAKEY(copaque);
2409 
2410  /* Get number of keys + heap TID for child/non-target item */
2411  uppnkeyatts = BTreeTupleGetNKeyAtts(child, state->rel);
2412  childheaptid = BTreeTupleGetHeapTIDCareful(state, child, nonpivot);
2413 
2414  /* Heap TID is tiebreaker key attribute */
2415  if (key->keysz == uppnkeyatts)
2416  return key->scantid == NULL && childheaptid != NULL;
2417 
2418  return key->keysz < uppnkeyatts;
2419  }
2420 
2421  return cmp < 0;
2422 }
ItemPointer scantid
Definition: nbtree.h:674
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:244
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:69
signed int int32
Definition: c.h:355
IndexTupleData * IndexTuple
Definition: itup.h:53
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:649
static ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state, IndexTuple itup, bool nonpivot)
bool pivotsearch
Definition: nbtree.h:673
#define Assert(condition)
Definition: c.h:738
#define BTreeTupleGetNKeyAtts(itup, rel)
Definition: verify_nbtree.c:50
bool heapkeyspace
Definition: nbtree.h:669
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, OffsetNumber offset)
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:742
#define P_ISLEAF(opaque)
Definition: nbtree.h:214

◆ invariant_l_offset()

static bool invariant_l_offset ( BtreeCheckState state,
BTScanInsert  key,
OffsetNumber  upperbound 
)
inlinestatic

Definition at line 2257 of file verify_nbtree.c.

References _bt_compare(), Assert, BTreeTupleGetHeapTIDCareful(), BTreeTupleGetNKeyAtts, cmp(), BTScanInsertData::heapkeyspace, invariant_leq_offset(), BTScanInsertData::keysz, P_FIRSTDATAKEY, P_ISLEAF, PageGetItem, PageGetItemIdCareful(), PageGetSpecialPointer, BTScanInsertData::pivotsearch, BtreeCheckState::rel, BTScanInsertData::scantid, BtreeCheckState::target, and BtreeCheckState::targetblock.

Referenced by bt_target_page_check().

2259 {
2260  ItemId itemid;
2261  int32 cmp;
2262 
2263  Assert(key->pivotsearch);
2264 
2265  /* Verify line pointer before checking tuple */
2266  itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
2267  upperbound);
2268  /* pg_upgrade'd indexes may legally have equal sibling tuples */
2269  if (!key->heapkeyspace)
2270  return invariant_leq_offset(state, key, upperbound);
2271 
2272  cmp = _bt_compare(state->rel, key, state->target, upperbound);
2273 
2274  /*
2275  * _bt_compare() is capable of determining that a scankey with a
2276  * filled-out attribute is greater than pivot tuples where the comparison
2277  * is resolved at a truncated attribute (value of attribute in pivot is
2278  * minus infinity). However, it is not capable of determining that a
2279  * scankey is _less than_ a tuple on the basis of a comparison resolved at
2280  * _scankey_ minus infinity attribute. Complete an extra step to simulate
2281  * having minus infinity values for omitted scankey attribute(s).
2282  */
2283  if (cmp == 0)
2284  {
2285  BTPageOpaque topaque;
2286  IndexTuple ritup;
2287  int uppnkeyatts;
2288  ItemPointer rheaptid;
2289  bool nonpivot;
2290 
2291  ritup = (IndexTuple) PageGetItem(state->target, itemid);
2292  topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
2293  nonpivot = P_ISLEAF(topaque) && upperbound >= P_FIRSTDATAKEY(topaque);
2294 
2295  /* Get number of keys + heap TID for item to the right */
2296  uppnkeyatts = BTreeTupleGetNKeyAtts(ritup, state->rel);
2297  rheaptid = BTreeTupleGetHeapTIDCareful(state, ritup, nonpivot);
2298 
2299  /* Heap TID is tiebreaker key attribute */
2300  if (key->keysz == uppnkeyatts)
2301  return key->scantid == NULL && rheaptid != NULL;
2302 
2303  return key->keysz < uppnkeyatts;
2304  }
2305 
2306  return cmp < 0;
2307 }
static bool invariant_leq_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound)
BlockNumber targetblock
Definition: verify_nbtree.c:93
ItemPointer scantid
Definition: nbtree.h:674
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:244
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:69
signed int int32
Definition: c.h:355
IndexTupleData * IndexTuple
Definition: itup.h:53
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:649
static ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state, IndexTuple itup, bool nonpivot)
bool pivotsearch
Definition: nbtree.h:673
#define Assert(condition)
Definition: c.h:738
#define BTreeTupleGetNKeyAtts(itup, rel)
Definition: verify_nbtree.c:50
bool heapkeyspace
Definition: nbtree.h:669
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, OffsetNumber offset)
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:742
#define P_ISLEAF(opaque)
Definition: nbtree.h:214

◆ invariant_leq_offset()

static bool invariant_leq_offset ( BtreeCheckState state,
BTScanInsert  key,
OffsetNumber  upperbound 
)
inlinestatic

Definition at line 2320 of file verify_nbtree.c.

References _bt_compare(), Assert, cmp(), BTScanInsertData::pivotsearch, BtreeCheckState::rel, and BtreeCheckState::target.

Referenced by bt_target_page_check(), and invariant_l_offset().

2322 {
2323  int32 cmp;
2324 
2325  Assert(key->pivotsearch);
2326 
2327  cmp = _bt_compare(state->rel, key, state->target, upperbound);
2328 
2329  return cmp <= 0;
2330 }
signed int int32
Definition: c.h:355
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:649
bool pivotsearch
Definition: nbtree.h:673
#define Assert(condition)
Definition: c.h:738
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:742

◆ offset_is_negative_infinity()

static bool offset_is_negative_infinity ( BTPageOpaque  opaque,
OffsetNumber  offset 
)
inlinestatic

Definition at line 2222 of file verify_nbtree.c.

References P_FIRSTDATAKEY, and P_ISLEAF.

Referenced by bt_downlink_check(), and bt_target_page_check().

2223 {
2224  /*
2225  * For internal pages only, the first item after high key, if any, is
2226  * negative infinity item. Internal pages always have a negative infinity
2227  * item, whereas leaf pages never have one. This implies that negative
2228  * infinity item is either first or second line item, or there is none
2229  * within page.
2230  *
2231  * Negative infinity items are a special case among pivot tuples. They
2232  * always have zero attributes, while all other pivot tuples always have
2233  * nkeyatts attributes.
2234  *
2235  * Right-most pages don't have a high key, but could be said to
2236  * conceptually have a "positive infinity" high key. Thus, there is a
2237  * symmetry between down link items in parent pages, and high keys in
2238  * children. Together, they represent the part of the key space that
2239  * belongs to each page in the index. For example, all children of the
2240  * root page will have negative infinity as a lower bound from root
2241  * negative infinity downlink, and positive infinity as an upper bound
2242  * (implicitly, from "imaginary" positive infinity high key in root).
2243  */
2244  return !P_ISLEAF(opaque) && offset == P_FIRSTDATAKEY(opaque);
2245 }
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:244
#define P_ISLEAF(opaque)
Definition: nbtree.h:214

◆ PageGetItemIdCareful()

static ItemId PageGetItemIdCareful ( BtreeCheckState state,
BlockNumber  block,
Page  page,
OffsetNumber  offset 
)
static

Definition at line 2617 of file verify_nbtree.c.

References ereport, errcode(), errdetail_internal(), errmsg(), ERROR, ItemIdGetFlags, ItemIdGetLength, ItemIdGetOffset, ItemIdIsRedirected, ItemIdIsUsed, PageGetItemId, BtreeCheckState::rel, and RelationGetRelationName.

Referenced by bt_check_level_from_leftmost(), bt_downlink_missing_check(), bt_right_page_check_scankey(), bt_target_page_check(), invariant_l_nontarget_offset(), and invariant_l_offset().

2619 {
2620  ItemId itemid = PageGetItemId(page, offset);
2621 
2622  if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) >
2623  BLCKSZ - sizeof(BTPageOpaqueData))
2624  ereport(ERROR,
2625  (errcode(ERRCODE_INDEX_CORRUPTED),
2626  errmsg("line pointer points past end of tuple space in index \"%s\"",
2627  RelationGetRelationName(state->rel)),
2628  errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
2629  block, offset, ItemIdGetOffset(itemid),
2630  ItemIdGetLength(itemid),
2631  ItemIdGetFlags(itemid))));
2632 
2633  /*
2634  * Verify that line pointer isn't LP_REDIRECT or LP_UNUSED, since nbtree
2635  * never uses either. Verify that line pointer has storage, too, since
2636  * even LP_DEAD items should within nbtree.
2637  */
2638  if (ItemIdIsRedirected(itemid) || !ItemIdIsUsed(itemid) ||
2639  ItemIdGetLength(itemid) == 0)
2640  ereport(ERROR,
2641  (errcode(ERRCODE_INDEX_CORRUPTED),
2642  errmsg("invalid line pointer storage in index \"%s\"",
2643  RelationGetRelationName(state->rel)),
2644  errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
2645  block, offset, ItemIdGetOffset(itemid),
2646  ItemIdGetLength(itemid),
2647  ItemIdGetFlags(itemid))));
2648 
2649  return itemid;
2650 }
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
int errcode(int sqlerrcode)
Definition: elog.c:608
int errdetail_internal(const char *fmt,...)
Definition: elog.c:982
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:462
#define ItemIdGetOffset(itemId)
Definition: itemid.h:65
#define ereport(elevel, rest)
Definition: elog.h:141
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define ItemIdGetFlags(itemId)
Definition: itemid.h:71
int errmsg(const char *fmt,...)
Definition: elog.c:822

◆ palloc_btree_page()

static Page palloc_btree_page ( BtreeCheckState state,
BlockNumber  blocknum 
)
static

Definition at line 2437 of file verify_nbtree.c.

References _bt_checkpage(), BT_READ, BTMetaPageData::btm_magic, BTMetaPageData::btm_version, BTPageGetMeta, BTPageOpaqueData::btpo, BTREE_MAGIC, BTREE_METAPAGE, BTREE_MIN_VERSION, BTREE_VERSION, BufferGetPage, BtreeCheckState::checkstrategy, ereport, errcode(), errhint(), errmsg(), ERROR, BTPageOpaqueData::level, LockBuffer(), MAIN_FORKNUM, MaxIndexTuplesPerPage, P_FIRSTDATAKEY, P_HAS_GARBAGE, P_HIKEY, P_ISDELETED, P_ISHALFDEAD, P_ISLEAF, P_ISMETA, P_RIGHTMOST, PageGetMaxOffsetNumber, PageGetSpecialPointer, palloc(), RBM_NORMAL, ReadBufferExtended(), BtreeCheckState::rel, RelationGetRelationName, and UnlockReleaseBuffer().

Referenced by bt_check_every_level(), bt_check_level_from_leftmost(), bt_downlink_check(), bt_downlink_missing_check(), bt_right_page_check_scankey(), and bt_target_page_check().

2438 {
2439  Buffer buffer;
2440  Page page;
2441  BTPageOpaque opaque;
2442  OffsetNumber maxoffset;
2443 
2444  page = palloc(BLCKSZ);
2445 
2446  /*
2447  * We copy the page into local storage to avoid holding pin on the buffer
2448  * longer than we must.
2449  */
2450  buffer = ReadBufferExtended(state->rel, MAIN_FORKNUM, blocknum, RBM_NORMAL,
2451  state->checkstrategy);
2452  LockBuffer(buffer, BT_READ);
2453 
2454  /*
2455  * Perform the same basic sanity checking that nbtree itself performs for
2456  * every page:
2457  */
2458  _bt_checkpage(state->rel, buffer);
2459 
2460  /* Only use copy of page in palloc()'d memory */
2461  memcpy(page, BufferGetPage(buffer), BLCKSZ);
2462  UnlockReleaseBuffer(buffer);
2463 
2464  opaque = (BTPageOpaque) PageGetSpecialPointer(page);
2465 
2466  if (P_ISMETA(opaque) && blocknum != BTREE_METAPAGE)
2467  ereport(ERROR,
2468  (errcode(ERRCODE_INDEX_CORRUPTED),
2469  errmsg("invalid meta page found at block %u in index \"%s\"",
2470  blocknum, RelationGetRelationName(state->rel))));
2471 
2472  /* Check page from block that ought to be meta page */
2473  if (blocknum == BTREE_METAPAGE)
2474  {
2475  BTMetaPageData *metad = BTPageGetMeta(page);
2476 
2477  if (!P_ISMETA(opaque) ||
2478  metad->btm_magic != BTREE_MAGIC)
2479  ereport(ERROR,
2480  (errcode(ERRCODE_INDEX_CORRUPTED),
2481  errmsg("index \"%s\" meta page is corrupt",
2482  RelationGetRelationName(state->rel))));
2483 
2484  if (metad->btm_version < BTREE_MIN_VERSION ||
2485  metad->btm_version > BTREE_VERSION)
2486  ereport(ERROR,
2487  (errcode(ERRCODE_INDEX_CORRUPTED),
2488  errmsg("version mismatch in index \"%s\": file version %d, "
2489  "current version %d, minimum supported version %d",
2490  RelationGetRelationName(state->rel),
2491  metad->btm_version, BTREE_VERSION,
2492  BTREE_MIN_VERSION)));
2493 
2494  /* Finished with metapage checks */
2495  return page;
2496  }
2497 
2498  /*
2499  * Deleted pages have no sane "level" field, so can only check non-deleted
2500  * page level
2501  */
2502  if (P_ISLEAF(opaque) && !P_ISDELETED(opaque) && opaque->btpo.level != 0)
2503  ereport(ERROR,
2504  (errcode(ERRCODE_INDEX_CORRUPTED),
2505  errmsg("invalid leaf page level %u for block %u in index \"%s\"",
2506  opaque->btpo.level, blocknum, RelationGetRelationName(state->rel))));
2507 
2508  if (!P_ISLEAF(opaque) && !P_ISDELETED(opaque) &&
2509  opaque->btpo.level == 0)
2510  ereport(ERROR,
2511  (errcode(ERRCODE_INDEX_CORRUPTED),
2512  errmsg("invalid internal page level 0 for block %u in index \"%s\"",
2513  blocknum, RelationGetRelationName(state->rel))));
2514 
2515  /*
2516  * Sanity checks for number of items on page.
2517  *
2518  * As noted at the beginning of _bt_binsrch(), an internal page must have
2519  * children, since there must always be a negative infinity downlink
2520  * (there may also be a highkey). In the case of non-rightmost leaf
2521  * pages, there must be at least a highkey.
2522  *
2523  * This is correct when pages are half-dead, since internal pages are
2524  * never half-dead, and leaf pages must have a high key when half-dead
2525  * (the rightmost page can never be deleted). It's also correct with
2526  * fully deleted pages: _bt_unlink_halfdead_page() doesn't change anything
2527  * about the target page other than setting the page as fully dead, and
2528  * setting its xact field. In particular, it doesn't change the sibling
2529  * links in the deletion target itself, since they're required when index
2530  * scans land on the deletion target, and then need to move right (or need
2531  * to move left, in the case of backward index scans).
2532  */
2533  maxoffset = PageGetMaxOffsetNumber(page);
2534  if (maxoffset > MaxIndexTuplesPerPage)
2535  ereport(ERROR,
2536  (errcode(ERRCODE_INDEX_CORRUPTED),
2537  errmsg("Number of items on block %u of index \"%s\" exceeds MaxIndexTuplesPerPage (%u)",
2538  blocknum, RelationGetRelationName(state->rel),
2540 
2541  if (!P_ISLEAF(opaque) && maxoffset < P_FIRSTDATAKEY(opaque))
2542  ereport(ERROR,
2543  (errcode(ERRCODE_INDEX_CORRUPTED),
2544  errmsg("internal block %u in index \"%s\" lacks high key and/or at least one downlink",
2545  blocknum, RelationGetRelationName(state->rel))));
2546 
2547  if (P_ISLEAF(opaque) && !P_RIGHTMOST(opaque) && maxoffset < P_HIKEY)
2548  ereport(ERROR,
2549  (errcode(ERRCODE_INDEX_CORRUPTED),
2550  errmsg("non-rightmost leaf block %u in index \"%s\" lacks high key item",
2551  blocknum, RelationGetRelationName(state->rel))));
2552 
2553  /*
2554  * In general, internal pages are never marked half-dead, except on
2555  * versions of Postgres prior to 9.4, where it can be valid transient
2556  * state. This state is nonetheless treated as corruption by VACUUM on
2557  * from version 9.4 on, so do the same here. See _bt_pagedel() for full
2558  * details.
2559  *
2560  * Internal pages should never have garbage items, either.
2561  */
2562  if (!P_ISLEAF(opaque) && P_ISHALFDEAD(opaque))
2563  ereport(ERROR,
2564  (errcode(ERRCODE_INDEX_CORRUPTED),
2565  errmsg("internal page block %u in index \"%s\" is half-dead",
2566  blocknum, RelationGetRelationName(state->rel)),
2567  errhint("This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it.")));
2568 
2569  if (!P_ISLEAF(opaque) && P_HAS_GARBAGE(opaque))
2570  ereport(ERROR,
2571  (errcode(ERRCODE_INDEX_CORRUPTED),
2572  errmsg("internal page block %u in index \"%s\" has garbage items",
2573  blocknum, RelationGetRelationName(state->rel))));
2574 
2575  return page;
2576 }
int errhint(const char *fmt,...)
Definition: elog.c:1069
uint32 btm_version
Definition: nbtree.h:101
#define BTREE_VERSION
Definition: nbtree.h:143
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:244
uint32 btm_magic
Definition: nbtree.h:100
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:642
union BTPageOpaqueData::@46 btpo
#define P_HAS_GARBAGE(opaque)
Definition: nbtree.h:220
int errcode(int sqlerrcode)
Definition: elog.c:608
BufferAccessStrategy checkstrategy
Definition: verify_nbtree.c:84
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:69
uint16 OffsetNumber
Definition: off.h:24
#define P_ISMETA(opaque)
Definition: nbtree.h:217
#define BT_READ
Definition: nbtree.h:597
#define BTREE_MAGIC
Definition: nbtree.h:142
void _bt_checkpage(Relation rel, Buffer buf)
Definition: nbtpage.c:718
#define P_ISHALFDEAD(opaque)
Definition: nbtree.h:218
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3398
#define ERROR
Definition: elog.h:43
#define BTPageGetMeta(p)
Definition: nbtree.h:114
#define RelationGetRelationName(relation)
Definition: rel.h:462
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define ereport(elevel, rest)
Definition: elog.h:141
#define BTREE_METAPAGE
Definition: nbtree.h:141
#define P_ISDELETED(opaque)
Definition: nbtree.h:216
uint32 level
Definition: nbtree.h:62
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3612
#define BTREE_MIN_VERSION
Definition: nbtree.h:144
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
#define P_HIKEY
Definition: nbtree.h:242
#define MaxIndexTuplesPerPage
Definition: itup.h:145
void * palloc(Size size)
Definition: mcxt.c:949
int errmsg(const char *fmt,...)
Definition: elog.c:822
int Buffer
Definition: buf.h:23
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:213
Pointer Page
Definition: bufpage.h:78
#define P_ISLEAF(opaque)
Definition: nbtree.h:214

◆ PG_FUNCTION_INFO_V1() [1/2]

PG_FUNCTION_INFO_V1 ( bt_index_check  )

◆ PG_FUNCTION_INFO_V1() [2/2]

PG_FUNCTION_INFO_V1 ( bt_index_parent_check  )

Variable Documentation

◆ PG_MODULE_MAGIC

PG_MODULE_MAGIC

Definition at line 43 of file verify_nbtree.c.