PostgreSQL Source Code  git master
verify_nbtree.c File Reference
#include "postgres.h"
#include "access/htup_details.h"
#include "access/nbtree.h"
#include "access/table.h"
#include "access/tableam.h"
#include "access/transam.h"
#include "access/xact.h"
#include "catalog/index.h"
#include "catalog/pg_am.h"
#include "commands/tablecmds.h"
#include "common/pg_prng.h"
#include "lib/bloomfilter.h"
#include "miscadmin.h"
#include "storage/lmgr.h"
#include "storage/smgr.h"
#include "utils/memutils.h"
#include "utils/snapmgr.h"
Include dependency graph for verify_nbtree.c:

Go to the source code of this file.

Data Structures

struct  BtreeCheckState
 
struct  BtreeLevel
 

Macros

#define InvalidBtreeLevel   ((uint32) InvalidBlockNumber)
 
#define BTreeTupleGetNKeyAtts(itup, rel)    Min(IndexRelationGetNumberOfKeyAttributes(rel), BTreeTupleGetNAtts(itup, rel))
 

Typedefs

typedef struct BtreeCheckState BtreeCheckState
 
typedef struct BtreeLevel BtreeLevel
 

Functions

 PG_FUNCTION_INFO_V1 (bt_index_check)
 
 PG_FUNCTION_INFO_V1 (bt_index_parent_check)
 
static void bt_index_check_internal (Oid indrelid, bool parentcheck, bool heapallindexed, bool rootdescend)
 
static void btree_index_checkable (Relation rel)
 
static bool btree_index_mainfork_expected (Relation rel)
 
static void bt_check_every_level (Relation rel, Relation heaprel, bool heapkeyspace, bool readonly, bool heapallindexed, bool rootdescend)
 
static BtreeLevel bt_check_level_from_leftmost (BtreeCheckState *state, BtreeLevel level)
 
static void bt_recheck_sibling_links (BtreeCheckState *state, BlockNumber btpo_prev_from_target, BlockNumber leftcurrent)
 
static void bt_target_page_check (BtreeCheckState *state)
 
static BTScanInsert bt_right_page_check_scankey (BtreeCheckState *state)
 
static void bt_child_check (BtreeCheckState *state, BTScanInsert targetkey, OffsetNumber downlinkoffnum)
 
static void bt_child_highkey_check (BtreeCheckState *state, OffsetNumber target_downlinkoffnum, Page loaded_child, uint32 target_level)
 
static void bt_downlink_missing_check (BtreeCheckState *state, bool rightsplit, BlockNumber targetblock, Page target)
 
static void bt_tuple_present_callback (Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *checkstate)
 
static IndexTuple bt_normalize_tuple (BtreeCheckState *state, IndexTuple itup)
 
static IndexTuple bt_posting_plain_tuple (IndexTuple itup, int n)
 
static bool bt_rootdescend (BtreeCheckState *state, IndexTuple itup)
 
static bool offset_is_negative_infinity (BTPageOpaque opaque, OffsetNumber offset)
 
static bool invariant_l_offset (BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound)
 
static bool invariant_leq_offset (BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound)
 
static bool invariant_g_offset (BtreeCheckState *state, BTScanInsert key, OffsetNumber lowerbound)
 
static bool invariant_l_nontarget_offset (BtreeCheckState *state, BTScanInsert key, BlockNumber nontargetblock, Page nontarget, OffsetNumber upperbound)
 
static Page palloc_btree_page (BtreeCheckState *state, BlockNumber blocknum)
 
static BTScanInsert bt_mkscankey_pivotsearch (Relation rel, IndexTuple itup)
 
static ItemId PageGetItemIdCareful (BtreeCheckState *state, BlockNumber block, Page page, OffsetNumber offset)
 
static ItemPointer BTreeTupleGetHeapTIDCareful (BtreeCheckState *state, IndexTuple itup, bool nonpivot)
 
static ItemPointer BTreeTupleGetPointsToTID (IndexTuple itup)
 
Datum bt_index_check (PG_FUNCTION_ARGS)
 
Datum bt_index_parent_check (PG_FUNCTION_ARGS)
 
static bool bt_pivot_tuple_identical (bool heapkeyspace, IndexTuple itup1, IndexTuple itup2)
 

Variables

 PG_MODULE_MAGIC
 

Macro Definition Documentation

◆ BTreeTupleGetNKeyAtts

#define BTreeTupleGetNKeyAtts (   itup,
  rel 
)     Min(IndexRelationGetNumberOfKeyAttributes(rel), BTreeTupleGetNAtts(itup, rel))

Definition at line 51 of file verify_nbtree.c.

◆ InvalidBtreeLevel

#define InvalidBtreeLevel   ((uint32) InvalidBlockNumber)

Definition at line 50 of file verify_nbtree.c.

Typedef Documentation

◆ BtreeCheckState

◆ BtreeLevel

typedef struct BtreeLevel BtreeLevel

Function Documentation

◆ bt_check_every_level()

static void bt_check_every_level ( Relation  rel,
Relation  heaprel,
bool  heapkeyspace,
bool  readonly,
bool  heapallindexed,
bool  rootdescend 
)
static

Definition at line 420 of file verify_nbtree.c.

422 {
424  Page metapage;
425  BTMetaPageData *metad;
426  uint32 previouslevel;
427  BtreeLevel current;
428  Snapshot snapshot = SnapshotAny;
429 
430  if (!readonly)
431  elog(DEBUG1, "verifying consistency of tree structure for index \"%s\"",
433  else
434  elog(DEBUG1, "verifying consistency of tree structure for index \"%s\" with cross-level checks",
436 
437  /*
438  * This assertion matches the one in index_getnext_tid(). See page
439  * recycling/"visible to everyone" notes in nbtree README.
440  */
442 
443  /*
444  * Initialize state for entire verification operation
445  */
446  state = palloc0(sizeof(BtreeCheckState));
447  state->rel = rel;
448  state->heaprel = heaprel;
449  state->heapkeyspace = heapkeyspace;
450  state->readonly = readonly;
451  state->heapallindexed = heapallindexed;
452  state->rootdescend = rootdescend;
453 
454  if (state->heapallindexed)
455  {
456  int64 total_pages;
457  int64 total_elems;
458  uint64 seed;
459 
460  /*
461  * Size Bloom filter based on estimated number of tuples in index,
462  * while conservatively assuming that each block must contain at least
463  * MaxTIDsPerBTreePage / 3 "plain" tuples -- see
464  * bt_posting_plain_tuple() for definition, and details of how posting
465  * list tuples are handled.
466  */
467  total_pages = RelationGetNumberOfBlocks(rel);
468  total_elems = Max(total_pages * (MaxTIDsPerBTreePage / 3),
469  (int64) state->rel->rd_rel->reltuples);
470  /* Generate a random seed to avoid repetition */
472  /* Create Bloom filter to fingerprint index */
473  state->filter = bloom_create(total_elems, maintenance_work_mem, seed);
474  state->heaptuplespresent = 0;
475 
476  /*
477  * Register our own snapshot in !readonly case, rather than asking
478  * table_index_build_scan() to do this for us later. This needs to
479  * happen before index fingerprinting begins, so we can later be
480  * certain that index fingerprinting should have reached all tuples
481  * returned by table_index_build_scan().
482  */
483  if (!state->readonly)
484  {
486 
487  /*
488  * GetTransactionSnapshot() always acquires a new MVCC snapshot in
489  * READ COMMITTED mode. A new snapshot is guaranteed to have all
490  * the entries it requires in the index.
491  *
492  * We must defend against the possibility that an old xact
493  * snapshot was returned at higher isolation levels when that
494  * snapshot is not safe for index scans of the target index. This
495  * is possible when the snapshot sees tuples that are before the
496  * index's indcheckxmin horizon. Throwing an error here should be
497  * very rare. It doesn't seem worth using a secondary snapshot to
498  * avoid this.
499  */
500  if (IsolationUsesXactSnapshot() && rel->rd_index->indcheckxmin &&
502  snapshot->xmin))
503  ereport(ERROR,
504  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
505  errmsg("index \"%s\" cannot be verified using transaction snapshot",
506  RelationGetRelationName(rel))));
507  }
508  }
509 
510  Assert(!state->rootdescend || state->readonly);
511  if (state->rootdescend && !state->heapkeyspace)
512  ereport(ERROR,
513  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
514  errmsg("cannot verify that tuples from index \"%s\" can each be found by an independent index search",
516  errhint("Only B-Tree version 4 indexes support rootdescend verification.")));
517 
518  /* Create context for page */
520  "amcheck context",
522  state->checkstrategy = GetAccessStrategy(BAS_BULKREAD);
523 
524  /* Get true root block from meta-page */
526  metad = BTPageGetMeta(metapage);
527 
528  /*
529  * Certain deletion patterns can result in "skinny" B-Tree indexes, where
530  * the fast root and true root differ.
531  *
532  * Start from the true root, not the fast root, unlike conventional index
533  * scans. This approach is more thorough, and removes the risk of
534  * following a stale fast root from the meta page.
535  */
536  if (metad->btm_fastroot != metad->btm_root)
537  ereport(DEBUG1,
538  (errcode(ERRCODE_NO_DATA),
539  errmsg_internal("harmless fast root mismatch in index \"%s\"",
541  errdetail_internal("Fast root block %u (level %u) differs from true root block %u (level %u).",
542  metad->btm_fastroot, metad->btm_fastlevel,
543  metad->btm_root, metad->btm_level)));
544 
545  /*
546  * Starting at the root, verify every level. Move left to right, top to
547  * bottom. Note that there may be no pages other than the meta page (meta
548  * page can indicate that root is P_NONE when the index is totally empty).
549  */
550  previouslevel = InvalidBtreeLevel;
551  current.level = metad->btm_level;
552  current.leftmost = metad->btm_root;
553  current.istruerootlevel = true;
554  while (current.leftmost != P_NONE)
555  {
556  /*
557  * Verify this level, and get left most page for next level down, if
558  * not at leaf level
559  */
560  current = bt_check_level_from_leftmost(state, current);
561 
562  if (current.leftmost == InvalidBlockNumber)
563  ereport(ERROR,
564  (errcode(ERRCODE_INDEX_CORRUPTED),
565  errmsg("index \"%s\" has no valid pages on level below %u or first level",
566  RelationGetRelationName(rel), previouslevel)));
567 
568  previouslevel = current.level;
569  }
570 
571  /*
572  * * Check whether heap contains unindexed/malformed tuples *
573  */
574  if (state->heapallindexed)
575  {
576  IndexInfo *indexinfo = BuildIndexInfo(state->rel);
577  TableScanDesc scan;
578 
579  /*
580  * Create our own scan for table_index_build_scan(), rather than
581  * getting it to do so for us. This is required so that we can
582  * actually use the MVCC snapshot registered earlier in !readonly
583  * case.
584  *
585  * Note that table_index_build_scan() calls heap_endscan() for us.
586  */
587  scan = table_beginscan_strat(state->heaprel, /* relation */
588  snapshot, /* snapshot */
589  0, /* number of keys */
590  NULL, /* scan key */
591  true, /* buffer access strategy OK */
592  true); /* syncscan OK? */
593 
594  /*
595  * Scan will behave as the first scan of a CREATE INDEX CONCURRENTLY
596  * behaves in !readonly case.
597  *
598  * It's okay that we don't actually use the same lock strength for the
599  * heap relation as any other ii_Concurrent caller would in !readonly
600  * case. We have no reason to care about a concurrent VACUUM
601  * operation, since there isn't going to be a second scan of the heap
602  * that needs to be sure that there was no concurrent recycling of
603  * TIDs.
604  */
605  indexinfo->ii_Concurrent = !state->readonly;
606 
607  /*
608  * Don't wait for uncommitted tuple xact commit/abort when index is a
609  * unique index on a catalog (or an index used by an exclusion
610  * constraint). This could otherwise happen in the readonly case.
611  */
612  indexinfo->ii_Unique = false;
613  indexinfo->ii_ExclusionOps = NULL;
614  indexinfo->ii_ExclusionProcs = NULL;
615  indexinfo->ii_ExclusionStrats = NULL;
616 
617  elog(DEBUG1, "verifying that tuples from index \"%s\" are present in \"%s\"",
619  RelationGetRelationName(state->heaprel));
620 
621  table_index_build_scan(state->heaprel, state->rel, indexinfo, true, false,
622  bt_tuple_present_callback, (void *) state, scan);
623 
624  ereport(DEBUG1,
625  (errmsg_internal("finished verifying presence of " INT64_FORMAT " tuples from table \"%s\" with bitset %.2f%% set",
626  state->heaptuplespresent, RelationGetRelationName(heaprel),
627  100.0 * bloom_prop_bits_set(state->filter))));
628 
629  if (snapshot != SnapshotAny)
630  UnregisterSnapshot(snapshot);
631 
632  bloom_free(state->filter);
633  }
634 
635  /* Be tidy: */
636  MemoryContextDelete(state->targetcontext);
637 }
#define InvalidBlockNumber
Definition: block.h:33
void bloom_free(bloom_filter *filter)
Definition: bloomfilter.c:126
bloom_filter * bloom_create(int64 total_elems, int bloom_work_mem, uint64 seed)
Definition: bloomfilter.c:87
double bloom_prop_bits_set(bloom_filter *filter)
Definition: bloomfilter.c:187
@ BAS_BULKREAD
Definition: bufmgr.h:30
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:212
Pointer Page
Definition: bufpage.h:78
unsigned int uint32
Definition: c.h:441
#define Max(x, y)
Definition: c.h:980
#define INT64_FORMAT
Definition: c.h:483
int errmsg_internal(const char *fmt,...)
Definition: elog.c:991
int errdetail_internal(const char *fmt,...)
Definition: elog.c:1064
int errhint(const char *fmt,...)
Definition: elog.c:1151
int errcode(int sqlerrcode)
Definition: elog.c:693
int errmsg(const char *fmt,...)
Definition: elog.c:904
#define DEBUG1
Definition: elog.h:24
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
#define ereport(elevel,...)
Definition: elog.h:143
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
int maintenance_work_mem
Definition: globals.c:126
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:308
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:2409
Assert(fmt[strlen(fmt) - 1] !='\n')
void * palloc0(Size size)
Definition: mcxt.c:1093
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:218
#define AllocSetContextCreate
Definition: memutils.h:173
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:195
#define BTPageGetMeta(p)
Definition: nbtree.h:119
#define MaxTIDsPerBTreePage
Definition: nbtree.h:184
#define P_NONE
Definition: nbtree.h:211
#define BTREE_METAPAGE
Definition: nbtree.h:146
uint64 pg_prng_uint64(pg_prng_state *state)
Definition: pg_prng.c:128
pg_prng_state pg_global_prng_state
Definition: pg_prng.c:28
#define RelationGetRelationName(relation)
Definition: rel.h:512
TransactionId RecentXmin
Definition: snapmgr.c:113
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:250
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:867
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:825
#define SnapshotAny
Definition: snapmgr.h:67
uint32 btm_level
Definition: nbtree.h:106
BlockNumber btm_fastroot
Definition: nbtree.h:107
BlockNumber btm_root
Definition: nbtree.h:105
uint32 btm_fastlevel
Definition: nbtree.h:108
bool istruerootlevel
uint32 level
BlockNumber leftmost
HeapTupleHeader t_data
Definition: htup.h:68
bool ii_Unique
Definition: execnodes.h:175
uint16 * ii_ExclusionStrats
Definition: execnodes.h:170
Oid * ii_ExclusionOps
Definition: execnodes.h:168
bool ii_Concurrent
Definition: execnodes.h:179
Oid * ii_ExclusionProcs
Definition: execnodes.h:169
struct HeapTupleData * rd_indextuple
Definition: rel.h:190
Form_pg_index rd_index
Definition: rel.h:188
TransactionId xmin
Definition: snapshot.h:157
Definition: regguts.h:318
static TableScanDesc table_beginscan_strat(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, bool allow_strat, bool allow_sync)
Definition: tableam.h:909
static double table_index_build_scan(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, bool allow_sync, bool progress, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition: tableam.h:1747
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static BtreeLevel bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
static void bt_tuple_present_callback(Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *checkstate)
#define InvalidBtreeLevel
Definition: verify_nbtree.c:50
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
#define IsolationUsesXactSnapshot()
Definition: xact.h:51

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert(), BAS_BULKREAD, bloom_create(), bloom_free(), bloom_prop_bits_set(), bt_check_level_from_leftmost(), bt_tuple_present_callback(), BTMetaPageData::btm_fastlevel, BTMetaPageData::btm_fastroot, BTMetaPageData::btm_level, BTMetaPageData::btm_root, BTPageGetMeta, BTREE_METAPAGE, BuildIndexInfo(), CurrentMemoryContext, DEBUG1, elog, ereport, errcode(), errdetail_internal(), errhint(), errmsg(), errmsg_internal(), ERROR, GetAccessStrategy(), GetTransactionSnapshot(), HeapTupleHeaderGetXmin, IndexInfo::ii_Concurrent, IndexInfo::ii_ExclusionOps, IndexInfo::ii_ExclusionProcs, IndexInfo::ii_ExclusionStrats, IndexInfo::ii_Unique, INT64_FORMAT, InvalidBlockNumber, InvalidBtreeLevel, IsolationUsesXactSnapshot, BtreeLevel::istruerootlevel, BtreeLevel::leftmost, BtreeLevel::level, maintenance_work_mem, Max, MaxTIDsPerBTreePage, MemoryContextDelete(), P_NONE, palloc0(), palloc_btree_page(), pg_global_prng_state, pg_prng_uint64(), RelationData::rd_index, RelationData::rd_indextuple, RecentXmin, RegisterSnapshot(), RelationGetNumberOfBlocks, RelationGetRelationName, SnapshotAny, HeapTupleData::t_data, table_beginscan_strat(), table_index_build_scan(), TransactionIdIsValid, TransactionIdPrecedes(), UnregisterSnapshot(), and SnapshotData::xmin.

Referenced by bt_index_check_internal().

◆ bt_check_level_from_leftmost()

static BtreeLevel bt_check_level_from_leftmost ( BtreeCheckState state,
BtreeLevel  level 
)
static

Definition at line 658 of file verify_nbtree.c.

659 {
660  /* State to establish early, concerning entire level */
661  BTPageOpaque opaque;
662  MemoryContext oldcontext;
663  BtreeLevel nextleveldown;
664 
665  /* Variables for iterating across level using right links */
666  BlockNumber leftcurrent = P_NONE;
667  BlockNumber current = level.leftmost;
668 
669  /* Initialize return state */
670  nextleveldown.leftmost = InvalidBlockNumber;
671  nextleveldown.level = InvalidBtreeLevel;
672  nextleveldown.istruerootlevel = false;
673 
674  /* Use page-level context for duration of this call */
675  oldcontext = MemoryContextSwitchTo(state->targetcontext);
676 
677  elog(DEBUG1, "verifying level %u%s", level.level,
678  level.istruerootlevel ?
679  " (true root level)" : level.level == 0 ? " (leaf level)" : "");
680 
681  state->prevrightlink = InvalidBlockNumber;
682  state->previncompletesplit = false;
683 
684  do
685  {
686  /* Don't rely on CHECK_FOR_INTERRUPTS() calls at lower level */
688 
689  /* Initialize state for this iteration */
690  state->targetblock = current;
691  state->target = palloc_btree_page(state, state->targetblock);
692  state->targetlsn = PageGetLSN(state->target);
693 
694  opaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
695 
696  if (P_IGNORE(opaque))
697  {
698  /*
699  * Since there cannot be a concurrent VACUUM operation in readonly
700  * mode, and since a page has no links within other pages
701  * (siblings and parent) once it is marked fully deleted, it
702  * should be impossible to land on a fully deleted page in
703  * readonly mode. See bt_child_check() for further details.
704  *
705  * The bt_child_check() P_ISDELETED() check is repeated here so
706  * that pages that are only reachable through sibling links get
707  * checked.
708  */
709  if (state->readonly && P_ISDELETED(opaque))
710  ereport(ERROR,
711  (errcode(ERRCODE_INDEX_CORRUPTED),
712  errmsg("downlink or sibling link points to deleted block in index \"%s\"",
714  errdetail_internal("Block=%u left block=%u left link from block=%u.",
715  current, leftcurrent, opaque->btpo_prev)));
716 
717  if (P_RIGHTMOST(opaque))
718  ereport(ERROR,
719  (errcode(ERRCODE_INDEX_CORRUPTED),
720  errmsg("block %u fell off the end of index \"%s\"",
721  current, RelationGetRelationName(state->rel))));
722  else
723  ereport(DEBUG1,
724  (errcode(ERRCODE_NO_DATA),
725  errmsg_internal("block %u of index \"%s\" concurrently deleted",
726  current, RelationGetRelationName(state->rel))));
727  goto nextpage;
728  }
729  else if (nextleveldown.leftmost == InvalidBlockNumber)
730  {
731  /*
732  * A concurrent page split could make the caller supplied leftmost
733  * block no longer contain the leftmost page, or no longer be the
734  * true root, but where that isn't possible due to heavyweight
735  * locking, check that the first valid page meets caller's
736  * expectations.
737  */
738  if (state->readonly)
739  {
740  if (!P_LEFTMOST(opaque))
741  ereport(ERROR,
742  (errcode(ERRCODE_INDEX_CORRUPTED),
743  errmsg("block %u is not leftmost in index \"%s\"",
744  current, RelationGetRelationName(state->rel))));
745 
746  if (level.istruerootlevel && !P_ISROOT(opaque))
747  ereport(ERROR,
748  (errcode(ERRCODE_INDEX_CORRUPTED),
749  errmsg("block %u is not true root in index \"%s\"",
750  current, RelationGetRelationName(state->rel))));
751  }
752 
753  /*
754  * Before beginning any non-trivial examination of level, prepare
755  * state for next bt_check_level_from_leftmost() invocation for
756  * the next level for the next level down (if any).
757  *
758  * There should be at least one non-ignorable page per level,
759  * unless this is the leaf level, which is assumed by caller to be
760  * final level.
761  */
762  if (!P_ISLEAF(opaque))
763  {
764  IndexTuple itup;
765  ItemId itemid;
766 
767  /* Internal page -- downlink gets leftmost on next level */
768  itemid = PageGetItemIdCareful(state, state->targetblock,
769  state->target,
770  P_FIRSTDATAKEY(opaque));
771  itup = (IndexTuple) PageGetItem(state->target, itemid);
772  nextleveldown.leftmost = BTreeTupleGetDownLink(itup);
773  nextleveldown.level = opaque->btpo_level - 1;
774  }
775  else
776  {
777  /*
778  * Leaf page -- final level caller must process.
779  *
780  * Note that this could also be the root page, if there has
781  * been no root page split yet.
782  */
783  nextleveldown.leftmost = P_NONE;
784  nextleveldown.level = InvalidBtreeLevel;
785  }
786 
787  /*
788  * Finished setting up state for this call/level. Control will
789  * never end up back here in any future loop iteration for this
790  * level.
791  */
792  }
793 
794  /* Sibling links should be in mutual agreement */
795  if (opaque->btpo_prev != leftcurrent)
796  bt_recheck_sibling_links(state, opaque->btpo_prev, leftcurrent);
797 
798  /* Check level */
799  if (level.level != opaque->btpo_level)
800  ereport(ERROR,
801  (errcode(ERRCODE_INDEX_CORRUPTED),
802  errmsg("leftmost down link for level points to block in index \"%s\" whose level is not one level down",
804  errdetail_internal("Block pointed to=%u expected level=%u level in pointed to block=%u.",
805  current, level.level, opaque->btpo_level)));
806 
807  /* Verify invariants for page */
809 
810 nextpage:
811 
812  /* Try to detect circular links */
813  if (current == leftcurrent || current == opaque->btpo_prev)
814  ereport(ERROR,
815  (errcode(ERRCODE_INDEX_CORRUPTED),
816  errmsg("circular link chain found in block %u of index \"%s\"",
817  current, RelationGetRelationName(state->rel))));
818 
819  leftcurrent = current;
820  current = opaque->btpo_next;
821 
822  if (state->lowkey)
823  {
824  Assert(state->readonly);
825  pfree(state->lowkey);
826  state->lowkey = NULL;
827  }
828 
829  /*
830  * Copy current target high key as the low key of right sibling.
831  * Allocate memory in upper level context, so it would be cleared
832  * after reset of target context.
833  *
834  * We only need the low key in corner cases of checking child high
835  * keys. We use high key only when incomplete split on the child level
836  * falls to the boundary of pages on the target level. See
837  * bt_child_highkey_check() for details. So, typically we won't end
838  * up doing anything with low key, but it's simpler for general case
839  * high key verification to always have it available.
840  *
841  * The correctness of managing low key in the case of concurrent
842  * splits wasn't investigated yet. Thankfully we only need low key
843  * for readonly verification and concurrent splits won't happen.
844  */
845  if (state->readonly && !P_RIGHTMOST(opaque))
846  {
847  IndexTuple itup;
848  ItemId itemid;
849 
850  itemid = PageGetItemIdCareful(state, state->targetblock,
851  state->target, P_HIKEY);
852  itup = (IndexTuple) PageGetItem(state->target, itemid);
853 
854  state->lowkey = MemoryContextAlloc(oldcontext, IndexTupleSize(itup));
855  memcpy(state->lowkey, itup, IndexTupleSize(itup));
856  }
857 
858  /* Free page and associated memory for this iteration */
859  MemoryContextReset(state->targetcontext);
860  }
861  while (current != P_NONE);
862 
863  if (state->lowkey)
864  {
865  Assert(state->readonly);
866  pfree(state->lowkey);
867  state->lowkey = NULL;
868  }
869 
870  /* Don't change context for caller */
871  MemoryContextSwitchTo(oldcontext);
872 
873  return nextleveldown;
874 }
uint32 BlockNumber
Definition: block.h:31
#define PageGetSpecialPointer(page)
Definition: bufpage.h:325
#define PageGetItem(page, itemId)
Definition: bufpage.h:339
#define PageGetLSN(page)
Definition: bufpage.h:365
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:143
void pfree(void *pointer)
Definition: mcxt.c:1169
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:120
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:71
#define P_ISLEAF(opaque)
Definition: nbtree.h:219
#define P_HIKEY
Definition: nbtree.h:367
#define P_LEFTMOST(opaque)
Definition: nbtree.h:217
#define P_ISDELETED(opaque)
Definition: nbtree.h:221
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:369
#define P_ISROOT(opaque)
Definition: nbtree.h:220
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:218
static BlockNumber BTreeTupleGetDownLink(IndexTuple pivot)
Definition: nbtree.h:549
#define P_IGNORE(opaque)
Definition: nbtree.h:224
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
BlockNumber btpo_next
Definition: nbtree.h:65
BlockNumber btpo_prev
Definition: nbtree.h:64
uint32 btpo_level
Definition: nbtree.h:66
static void bt_target_page_check(BtreeCheckState *state)
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, OffsetNumber offset)
static void bt_recheck_sibling_links(BtreeCheckState *state, BlockNumber btpo_prev_from_target, BlockNumber leftcurrent)

References Assert(), bt_recheck_sibling_links(), bt_target_page_check(), BTPageOpaqueData::btpo_level, BTPageOpaqueData::btpo_next, BTPageOpaqueData::btpo_prev, BTreeTupleGetDownLink(), CHECK_FOR_INTERRUPTS, DEBUG1, elog, ereport, errcode(), errdetail_internal(), errmsg(), errmsg_internal(), ERROR, IndexTupleSize, InvalidBlockNumber, InvalidBtreeLevel, BtreeLevel::istruerootlevel, BtreeLevel::leftmost, BtreeLevel::level, MemoryContextAlloc(), MemoryContextReset(), MemoryContextSwitchTo(), P_FIRSTDATAKEY, P_HIKEY, P_IGNORE, P_ISDELETED, P_ISLEAF, P_ISROOT, P_LEFTMOST, P_NONE, P_RIGHTMOST, PageGetItem, PageGetItemIdCareful(), PageGetLSN, PageGetSpecialPointer, palloc_btree_page(), pfree(), and RelationGetRelationName.

Referenced by bt_check_every_level().

◆ bt_child_check()

static void bt_child_check ( BtreeCheckState state,
BTScanInsert  targetkey,
OffsetNumber  downlinkoffnum 
)
static

Definition at line 2070 of file verify_nbtree.c.

2072 {
2073  ItemId itemid;
2074  IndexTuple itup;
2075  BlockNumber childblock;
2076  OffsetNumber offset;
2077  OffsetNumber maxoffset;
2078  Page child;
2079  BTPageOpaque copaque;
2080  BTPageOpaque topaque;
2081 
2082  itemid = PageGetItemIdCareful(state, state->targetblock,
2083  state->target, downlinkoffnum);
2084  itup = (IndexTuple) PageGetItem(state->target, itemid);
2085  childblock = BTreeTupleGetDownLink(itup);
2086 
2087  /*
2088  * Caller must have ShareLock on target relation, because of
2089  * considerations around page deletion by VACUUM.
2090  *
2091  * NB: In general, page deletion deletes the right sibling's downlink, not
2092  * the downlink of the page being deleted; the deleted page's downlink is
2093  * reused for its sibling. The key space is thereby consolidated between
2094  * the deleted page and its right sibling. (We cannot delete a parent
2095  * page's rightmost child unless it is the last child page, and we intend
2096  * to also delete the parent itself.)
2097  *
2098  * If this verification happened without a ShareLock, the following race
2099  * condition could cause false positives:
2100  *
2101  * In general, concurrent page deletion might occur, including deletion of
2102  * the left sibling of the child page that is examined here. If such a
2103  * page deletion were to occur, closely followed by an insertion into the
2104  * newly expanded key space of the child, a window for the false positive
2105  * opens up: the stale parent/target downlink originally followed to get
2106  * to the child legitimately ceases to be a lower bound on all items in
2107  * the page, since the key space was concurrently expanded "left".
2108  * (Insertion followed the "new" downlink for the child, not our now-stale
2109  * downlink, which was concurrently physically removed in target/parent as
2110  * part of deletion's first phase.)
2111  *
2112  * While we use various techniques elsewhere to perform cross-page
2113  * verification for !readonly callers, a similar trick seems difficult
2114  * here. The tricks used by bt_recheck_sibling_links and by
2115  * bt_right_page_check_scankey both involve verification of a same-level,
2116  * cross-sibling invariant. Cross-level invariants are far more squishy,
2117  * though. The nbtree REDO routines do not actually couple buffer locks
2118  * across levels during page splits, so making any cross-level check work
2119  * reliably in !readonly mode may be impossible.
2120  */
2121  Assert(state->readonly);
2122 
2123  /*
2124  * Verify child page has the downlink key from target page (its parent) as
2125  * a lower bound; downlink must be strictly less than all keys on the
2126  * page.
2127  *
2128  * Check all items, rather than checking just the first and trusting that
2129  * the operator class obeys the transitive law.
2130  */
2131  topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
2132  child = palloc_btree_page(state, childblock);
2133  copaque = (BTPageOpaque) PageGetSpecialPointer(child);
2134  maxoffset = PageGetMaxOffsetNumber(child);
2135 
2136  /*
2137  * Since we've already loaded the child block, combine this check with
2138  * check for downlink connectivity.
2139  */
2140  bt_child_highkey_check(state, downlinkoffnum,
2141  child, topaque->btpo_level);
2142 
2143  /*
2144  * Since there cannot be a concurrent VACUUM operation in readonly mode,
2145  * and since a page has no links within other pages (siblings and parent)
2146  * once it is marked fully deleted, it should be impossible to land on a
2147  * fully deleted page.
2148  *
2149  * It does not quite make sense to enforce that the page cannot even be
2150  * half-dead, despite the fact the downlink is modified at the same stage
2151  * that the child leaf page is marked half-dead. That's incorrect because
2152  * there may occasionally be multiple downlinks from a chain of pages
2153  * undergoing deletion, where multiple successive calls are made to
2154  * _bt_unlink_halfdead_page() by VACUUM before it can finally safely mark
2155  * the leaf page as fully dead. While _bt_mark_page_halfdead() usually
2156  * removes the downlink to the leaf page that is marked half-dead, that's
2157  * not guaranteed, so it's possible we'll land on a half-dead page with a
2158  * downlink due to an interrupted multi-level page deletion.
2159  *
2160  * We go ahead with our checks if the child page is half-dead. It's safe
2161  * to do so because we do not test the child's high key, so it does not
2162  * matter that the original high key will have been replaced by a dummy
2163  * truncated high key within _bt_mark_page_halfdead(). All other page
2164  * items are left intact on a half-dead page, so there is still something
2165  * to test.
2166  */
2167  if (P_ISDELETED(copaque))
2168  ereport(ERROR,
2169  (errcode(ERRCODE_INDEX_CORRUPTED),
2170  errmsg("downlink to deleted page found in index \"%s\"",
2172  errdetail_internal("Parent block=%u child block=%u parent page lsn=%X/%X.",
2173  state->targetblock, childblock,
2174  LSN_FORMAT_ARGS(state->targetlsn))));
2175 
2176  for (offset = P_FIRSTDATAKEY(copaque);
2177  offset <= maxoffset;
2178  offset = OffsetNumberNext(offset))
2179  {
2180  /*
2181  * Skip comparison of target page key against "negative infinity"
2182  * item, if any. Checking it would indicate that it's not a strict
2183  * lower bound, but that's only because of the hard-coding for
2184  * negative infinity items within _bt_compare().
2185  *
2186  * If nbtree didn't truncate negative infinity tuples during internal
2187  * page splits then we'd expect child's negative infinity key to be
2188  * equal to the scankey/downlink from target/parent (it would be a
2189  * "low key" in this hypothetical scenario, and so it would still need
2190  * to be treated as a special case here).
2191  *
2192  * Negative infinity items can be thought of as a strict lower bound
2193  * that works transitively, with the last non-negative-infinity pivot
2194  * followed during a descent from the root as its "true" strict lower
2195  * bound. Only a small number of negative infinity items are truly
2196  * negative infinity; those that are the first items of leftmost
2197  * internal pages. In more general terms, a negative infinity item is
2198  * only negative infinity with respect to the subtree that the page is
2199  * at the root of.
2200  *
2201  * See also: bt_rootdescend(), which can even detect transitive
2202  * inconsistencies on cousin leaf pages.
2203  */
2204  if (offset_is_negative_infinity(copaque, offset))
2205  continue;
2206 
2207  if (!invariant_l_nontarget_offset(state, targetkey, childblock, child,
2208  offset))
2209  ereport(ERROR,
2210  (errcode(ERRCODE_INDEX_CORRUPTED),
2211  errmsg("down-link lower bound invariant violated for index \"%s\"",
2213  errdetail_internal("Parent block=%u child index tid=(%u,%u) parent page lsn=%X/%X.",
2214  state->targetblock, childblock, offset,
2215  LSN_FORMAT_ARGS(state->targetlsn))));
2216  }
2217 
2218  pfree(child);
2219 }
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:356
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
static bool offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset)
static void bt_child_highkey_check(BtreeCheckState *state, OffsetNumber target_downlinkoffnum, Page loaded_child, uint32 target_level)
static bool invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, BlockNumber nontargetblock, Page nontarget, OffsetNumber upperbound)
#define LSN_FORMAT_ARGS(lsn)
Definition: xlogdefs.h:43

References Assert(), bt_child_highkey_check(), BTPageOpaqueData::btpo_level, BTreeTupleGetDownLink(), ereport, errcode(), errdetail_internal(), errmsg(), ERROR, invariant_l_nontarget_offset(), LSN_FORMAT_ARGS, offset_is_negative_infinity(), OffsetNumberNext, P_FIRSTDATAKEY, P_ISDELETED, PageGetItem, PageGetItemIdCareful(), PageGetMaxOffsetNumber, PageGetSpecialPointer, palloc_btree_page(), pfree(), and RelationGetRelationName.

Referenced by bt_target_page_check().

◆ bt_child_highkey_check()

static void bt_child_highkey_check ( BtreeCheckState state,
OffsetNumber  target_downlinkoffnum,
Page  loaded_child,
uint32  target_level 
)
static

Definition at line 1824 of file verify_nbtree.c.

1828 {
1829  BlockNumber blkno = state->prevrightlink;
1830  Page page;
1831  BTPageOpaque opaque;
1832  bool rightsplit = state->previncompletesplit;
1833  bool first = true;
1834  ItemId itemid;
1835  IndexTuple itup;
1836  BlockNumber downlink;
1837 
1838  if (OffsetNumberIsValid(target_downlinkoffnum))
1839  {
1840  itemid = PageGetItemIdCareful(state, state->targetblock,
1841  state->target, target_downlinkoffnum);
1842  itup = (IndexTuple) PageGetItem(state->target, itemid);
1843  downlink = BTreeTupleGetDownLink(itup);
1844  }
1845  else
1846  {
1847  downlink = P_NONE;
1848  }
1849 
1850  /*
1851  * If no previous rightlink is memorized for current level just below
1852  * target page's level, we are about to start from the leftmost page. We
1853  * can't follow rightlinks from previous page, because there is no
1854  * previous page. But we still can match high key.
1855  *
1856  * So we initialize variables for the loop above like there is previous
1857  * page referencing current child. Also we imply previous page to not
1858  * have incomplete split flag, that would make us require downlink for
1859  * current child. That's correct, because leftmost page on the level
1860  * should always have parent downlink.
1861  */
1862  if (!BlockNumberIsValid(blkno))
1863  {
1864  blkno = downlink;
1865  rightsplit = false;
1866  }
1867 
1868  /* Move to the right on the child level */
1869  while (true)
1870  {
1871  /*
1872  * Did we traverse the whole tree level and this is check for pages to
1873  * the right of rightmost downlink?
1874  */
1875  if (blkno == P_NONE && downlink == P_NONE)
1876  {
1877  state->prevrightlink = InvalidBlockNumber;
1878  state->previncompletesplit = false;
1879  return;
1880  }
1881 
1882  /* Did we traverse the whole tree level and don't find next downlink? */
1883  if (blkno == P_NONE)
1884  ereport(ERROR,
1885  (errcode(ERRCODE_INDEX_CORRUPTED),
1886  errmsg("can't traverse from downlink %u to downlink %u of index \"%s\"",
1887  state->prevrightlink, downlink,
1888  RelationGetRelationName(state->rel))));
1889 
1890  /* Load page contents */
1891  if (blkno == downlink && loaded_child)
1892  page = loaded_child;
1893  else
1894  page = palloc_btree_page(state, blkno);
1895 
1896  opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1897 
1898  /* The first page we visit at the level should be leftmost */
1899  if (first && !BlockNumberIsValid(state->prevrightlink) && !P_LEFTMOST(opaque))
1900  ereport(ERROR,
1901  (errcode(ERRCODE_INDEX_CORRUPTED),
1902  errmsg("the first child of leftmost target page is not leftmost of its level in index \"%s\"",
1904  errdetail_internal("Target block=%u child block=%u target page lsn=%X/%X.",
1905  state->targetblock, blkno,
1906  LSN_FORMAT_ARGS(state->targetlsn))));
1907 
1908  /* Do level sanity check */
1909  if ((!P_ISDELETED(opaque) || P_HAS_FULLXID(opaque)) &&
1910  opaque->btpo_level != target_level - 1)
1911  ereport(ERROR,
1912  (errcode(ERRCODE_INDEX_CORRUPTED),
1913  errmsg("block found while following rightlinks from child of index \"%s\" has invalid level",
1915  errdetail_internal("Block pointed to=%u expected level=%u level in pointed to block=%u.",
1916  blkno, target_level - 1, opaque->btpo_level)));
1917 
1918  /* Try to detect circular links */
1919  if ((!first && blkno == state->prevrightlink) || blkno == opaque->btpo_prev)
1920  ereport(ERROR,
1921  (errcode(ERRCODE_INDEX_CORRUPTED),
1922  errmsg("circular link chain found in block %u of index \"%s\"",
1923  blkno, RelationGetRelationName(state->rel))));
1924 
1925  if (blkno != downlink && !P_IGNORE(opaque))
1926  {
1927  /* blkno probably has missing parent downlink */
1928  bt_downlink_missing_check(state, rightsplit, blkno, page);
1929  }
1930 
1931  rightsplit = P_INCOMPLETE_SPLIT(opaque);
1932 
1933  /*
1934  * If we visit page with high key, check that it is equal to the
1935  * target key next to corresponding downlink.
1936  */
1937  if (!rightsplit && !P_RIGHTMOST(opaque))
1938  {
1939  BTPageOpaque topaque;
1940  IndexTuple highkey;
1941  OffsetNumber pivotkey_offset;
1942 
1943  /* Get high key */
1944  itemid = PageGetItemIdCareful(state, blkno, page, P_HIKEY);
1945  highkey = (IndexTuple) PageGetItem(page, itemid);
1946 
1947  /*
1948  * There might be two situations when we examine high key. If
1949  * current child page is referenced by given target downlink, we
1950  * should look to the next offset number for matching key from
1951  * target page.
1952  *
1953  * Alternatively, we're following rightlinks somewhere in the
1954  * middle between page referenced by previous target's downlink
1955  * and the page referenced by current target's downlink. If
1956  * current child page hasn't incomplete split flag set, then its
1957  * high key should match to the target's key of current offset
1958  * number. This happens when a previous call here (to
1959  * bt_child_highkey_check()) found an incomplete split, and we
1960  * reach a right sibling page without a downlink -- the right
1961  * sibling page's high key still needs to be matched to a
1962  * separator key on the parent/target level.
1963  *
1964  * Don't apply OffsetNumberNext() to target_downlinkoffnum when we
1965  * already had to step right on the child level. Our traversal of
1966  * the child level must try to move in perfect lockstep behind (to
1967  * the left of) the target/parent level traversal.
1968  */
1969  if (blkno == downlink)
1970  pivotkey_offset = OffsetNumberNext(target_downlinkoffnum);
1971  else
1972  pivotkey_offset = target_downlinkoffnum;
1973 
1974  topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
1975 
1976  if (!offset_is_negative_infinity(topaque, pivotkey_offset))
1977  {
1978  /*
1979  * If we're looking for the next pivot tuple in target page,
1980  * but there is no more pivot tuples, then we should match to
1981  * high key instead.
1982  */
1983  if (pivotkey_offset > PageGetMaxOffsetNumber(state->target))
1984  {
1985  if (P_RIGHTMOST(topaque))
1986  ereport(ERROR,
1987  (errcode(ERRCODE_INDEX_CORRUPTED),
1988  errmsg("child high key is greater than rightmost pivot key on target level in index \"%s\"",
1990  errdetail_internal("Target block=%u child block=%u target page lsn=%X/%X.",
1991  state->targetblock, blkno,
1992  LSN_FORMAT_ARGS(state->targetlsn))));
1993  pivotkey_offset = P_HIKEY;
1994  }
1995  itemid = PageGetItemIdCareful(state, state->targetblock,
1996  state->target, pivotkey_offset);
1997  itup = (IndexTuple) PageGetItem(state->target, itemid);
1998  }
1999  else
2000  {
2001  /*
2002  * We cannot try to match child's high key to a negative
2003  * infinity key in target, since there is nothing to compare.
2004  * However, it's still possible to match child's high key
2005  * outside of target page. The reason why we're are is that
2006  * bt_child_highkey_check() was previously called for the
2007  * cousin page of 'loaded_child', which is incomplete split.
2008  * So, now we traverse to the right of that cousin page and
2009  * current child level page under consideration still belongs
2010  * to the subtree of target's left sibling. Thus, we need to
2011  * match child's high key to it's left uncle page high key.
2012  * Thankfully we saved it, it's called a "low key" of target
2013  * page.
2014  */
2015  if (!state->lowkey)
2016  ereport(ERROR,
2017  (errcode(ERRCODE_INDEX_CORRUPTED),
2018  errmsg("can't find left sibling high key in index \"%s\"",
2020  errdetail_internal("Target block=%u child block=%u target page lsn=%X/%X.",
2021  state->targetblock, blkno,
2022  LSN_FORMAT_ARGS(state->targetlsn))));
2023  itup = state->lowkey;
2024  }
2025 
2026  if (!bt_pivot_tuple_identical(state->heapkeyspace, highkey, itup))
2027  {
2028  ereport(ERROR,
2029  (errcode(ERRCODE_INDEX_CORRUPTED),
2030  errmsg("mismatch between parent key and child high key in index \"%s\"",
2032  errdetail_internal("Target block=%u child block=%u target page lsn=%X/%X.",
2033  state->targetblock, blkno,
2034  LSN_FORMAT_ARGS(state->targetlsn))));
2035  }
2036  }
2037 
2038  /* Exit if we already found next downlink */
2039  if (blkno == downlink)
2040  {
2041  state->prevrightlink = opaque->btpo_next;
2042  state->previncompletesplit = rightsplit;
2043  return;
2044  }
2045 
2046  /* Traverse to the next page using rightlink */
2047  blkno = opaque->btpo_next;
2048 
2049  /* Free page contents if it's allocated by us */
2050  if (page != loaded_child)
2051  pfree(page);
2052  first = false;
2053  }
2054 }
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
#define P_HAS_FULLXID(opaque)
Definition: nbtree.h:227
#define P_INCOMPLETE_SPLIT(opaque)
Definition: nbtree.h:226
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
static bool bt_pivot_tuple_identical(bool heapkeyspace, IndexTuple itup1, IndexTuple itup2)
static void bt_downlink_missing_check(BtreeCheckState *state, bool rightsplit, BlockNumber targetblock, Page target)

References BlockNumberIsValid, bt_downlink_missing_check(), bt_pivot_tuple_identical(), BTPageOpaqueData::btpo_level, BTPageOpaqueData::btpo_next, BTPageOpaqueData::btpo_prev, BTreeTupleGetDownLink(), ereport, errcode(), errdetail_internal(), errmsg(), ERROR, InvalidBlockNumber, LSN_FORMAT_ARGS, offset_is_negative_infinity(), OffsetNumberIsValid, OffsetNumberNext, P_HAS_FULLXID, P_HIKEY, P_IGNORE, P_INCOMPLETE_SPLIT, P_ISDELETED, P_LEFTMOST, P_NONE, P_RIGHTMOST, PageGetItem, PageGetItemIdCareful(), PageGetMaxOffsetNumber, PageGetSpecialPointer, palloc_btree_page(), pfree(), and RelationGetRelationName.

Referenced by bt_child_check(), and bt_target_page_check().

◆ bt_downlink_missing_check()

static void bt_downlink_missing_check ( BtreeCheckState state,
bool  rightsplit,
BlockNumber  targetblock,
Page  target 
)
static

Definition at line 2235 of file verify_nbtree.c.

2237 {
2239  ItemId itemid;
2240  IndexTuple itup;
2241  Page child;
2242  BTPageOpaque copaque;
2243  uint32 level;
2244  BlockNumber childblk;
2245  XLogRecPtr pagelsn;
2246 
2247  Assert(state->readonly);
2248  Assert(!P_IGNORE(opaque));
2249 
2250  /* No next level up with downlinks to fingerprint from the true root */
2251  if (P_ISROOT(opaque))
2252  return;
2253 
2254  pagelsn = PageGetLSN(page);
2255 
2256  /*
2257  * Incomplete (interrupted) page splits can account for the lack of a
2258  * downlink. Some inserting transaction should eventually complete the
2259  * page split in passing, when it notices that the left sibling page is
2260  * P_INCOMPLETE_SPLIT().
2261  *
2262  * In general, VACUUM is not prepared for there to be no downlink to a
2263  * page that it deletes. This is the main reason why the lack of a
2264  * downlink can be reported as corruption here. It's not obvious that an
2265  * invalid missing downlink can result in wrong answers to queries,
2266  * though, since index scans that land on the child may end up
2267  * consistently moving right. The handling of concurrent page splits (and
2268  * page deletions) within _bt_moveright() cannot distinguish
2269  * inconsistencies that last for a moment from inconsistencies that are
2270  * permanent and irrecoverable.
2271  *
2272  * VACUUM isn't even prepared to delete pages that have no downlink due to
2273  * an incomplete page split, but it can detect and reason about that case
2274  * by design, so it shouldn't be taken to indicate corruption. See
2275  * _bt_pagedel() for full details.
2276  */
2277  if (rightsplit)
2278  {
2279  ereport(DEBUG1,
2280  (errcode(ERRCODE_NO_DATA),
2281  errmsg_internal("harmless interrupted page split detected in index \"%s\"",
2283  errdetail_internal("Block=%u level=%u left sibling=%u page lsn=%X/%X.",
2284  blkno, opaque->btpo_level,
2285  opaque->btpo_prev,
2286  LSN_FORMAT_ARGS(pagelsn))));
2287  return;
2288  }
2289 
2290  /*
2291  * Page under check is probably the "top parent" of a multi-level page
2292  * deletion. We'll need to descend the subtree to make sure that
2293  * descendant pages are consistent with that, though.
2294  *
2295  * If the page (which must be non-ignorable) is a leaf page, then clearly
2296  * it can't be the top parent. The lack of a downlink is probably a
2297  * symptom of a broad problem that could just as easily cause
2298  * inconsistencies anywhere else.
2299  */
2300  if (P_ISLEAF(opaque))
2301  ereport(ERROR,
2302  (errcode(ERRCODE_INDEX_CORRUPTED),
2303  errmsg("leaf index block lacks downlink in index \"%s\"",
2305  errdetail_internal("Block=%u page lsn=%X/%X.",
2306  blkno,
2307  LSN_FORMAT_ARGS(pagelsn))));
2308 
2309  /* Descend from the given page, which is an internal page */
2310  elog(DEBUG1, "checking for interrupted multi-level deletion due to missing downlink in index \"%s\"",
2312 
2313  level = opaque->btpo_level;
2314  itemid = PageGetItemIdCareful(state, blkno, page, P_FIRSTDATAKEY(opaque));
2315  itup = (IndexTuple) PageGetItem(page, itemid);
2316  childblk = BTreeTupleGetDownLink(itup);
2317  for (;;)
2318  {
2320 
2321  child = palloc_btree_page(state, childblk);
2322  copaque = (BTPageOpaque) PageGetSpecialPointer(child);
2323 
2324  if (P_ISLEAF(copaque))
2325  break;
2326 
2327  /* Do an extra sanity check in passing on internal pages */
2328  if (copaque->btpo_level != level - 1)
2329  ereport(ERROR,
2330  (errcode(ERRCODE_INDEX_CORRUPTED),
2331  errmsg_internal("downlink points to block in index \"%s\" whose level is not one level down",
2333  errdetail_internal("Top parent/under check block=%u block pointed to=%u expected level=%u level in pointed to block=%u.",
2334  blkno, childblk,
2335  level - 1, copaque->btpo_level)));
2336 
2337  level = copaque->btpo_level;
2338  itemid = PageGetItemIdCareful(state, childblk, child,
2339  P_FIRSTDATAKEY(copaque));
2340  itup = (IndexTuple) PageGetItem(child, itemid);
2341  childblk = BTreeTupleGetDownLink(itup);
2342  /* Be slightly more pro-active in freeing this memory, just in case */
2343  pfree(child);
2344  }
2345 
2346  /*
2347  * Since there cannot be a concurrent VACUUM operation in readonly mode,
2348  * and since a page has no links within other pages (siblings and parent)
2349  * once it is marked fully deleted, it should be impossible to land on a
2350  * fully deleted page. See bt_child_check() for further details.
2351  *
2352  * The bt_child_check() P_ISDELETED() check is repeated here because
2353  * bt_child_check() does not visit pages reachable through negative
2354  * infinity items. Besides, bt_child_check() is unwilling to descend
2355  * multiple levels. (The similar bt_child_check() P_ISDELETED() check
2356  * within bt_check_level_from_leftmost() won't reach the page either,
2357  * since the leaf's live siblings should have their sibling links updated
2358  * to bypass the deletion target page when it is marked fully dead.)
2359  *
2360  * If this error is raised, it might be due to a previous multi-level page
2361  * deletion that failed to realize that it wasn't yet safe to mark the
2362  * leaf page as fully dead. A "dangling downlink" will still remain when
2363  * this happens. The fact that the dangling downlink's page (the leaf's
2364  * parent/ancestor page) lacked a downlink is incidental.
2365  */
2366  if (P_ISDELETED(copaque))
2367  ereport(ERROR,
2368  (errcode(ERRCODE_INDEX_CORRUPTED),
2369  errmsg_internal("downlink to deleted leaf page found in index \"%s\"",
2371  errdetail_internal("Top parent/target block=%u leaf block=%u top parent/under check lsn=%X/%X.",
2372  blkno, childblk,
2373  LSN_FORMAT_ARGS(pagelsn))));
2374 
2375  /*
2376  * Iff leaf page is half-dead, its high key top parent link should point
2377  * to what VACUUM considered to be the top parent page at the instant it
2378  * was interrupted. Provided the high key link actually points to the
2379  * page under check, the missing downlink we detected is consistent with
2380  * there having been an interrupted multi-level page deletion. This means
2381  * that the subtree with the page under check at its root (a page deletion
2382  * chain) is in a consistent state, enabling VACUUM to resume deleting the
2383  * entire chain the next time it encounters the half-dead leaf page.
2384  */
2385  if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque))
2386  {
2387  itemid = PageGetItemIdCareful(state, childblk, child, P_HIKEY);
2388  itup = (IndexTuple) PageGetItem(child, itemid);
2389  if (BTreeTupleGetTopParent(itup) == blkno)
2390  return;
2391  }
2392 
2393  ereport(ERROR,
2394  (errcode(ERRCODE_INDEX_CORRUPTED),
2395  errmsg("internal index block lacks downlink in index \"%s\"",
2397  errdetail_internal("Block=%u level=%u page lsn=%X/%X.",
2398  blkno, opaque->btpo_level,
2399  LSN_FORMAT_ARGS(pagelsn))));
2400 }
#define P_ISHALFDEAD(opaque)
Definition: nbtree.h:223
static BlockNumber BTreeTupleGetTopParent(IndexTuple leafhikey)
Definition: nbtree.h:613
uint64 XLogRecPtr
Definition: xlogdefs.h:21

References Assert(), BTPageOpaqueData::btpo_level, BTPageOpaqueData::btpo_prev, BTreeTupleGetDownLink(), BTreeTupleGetTopParent(), CHECK_FOR_INTERRUPTS, DEBUG1, elog, ereport, errcode(), errdetail_internal(), errmsg(), errmsg_internal(), ERROR, LSN_FORMAT_ARGS, P_FIRSTDATAKEY, P_HIKEY, P_IGNORE, P_ISDELETED, P_ISHALFDEAD, P_ISLEAF, P_ISROOT, P_RIGHTMOST, PageGetItem, PageGetItemIdCareful(), PageGetLSN, PageGetSpecialPointer, palloc_btree_page(), pfree(), and RelationGetRelationName.

Referenced by bt_child_highkey_check().

◆ bt_index_check()

Datum bt_index_check ( PG_FUNCTION_ARGS  )

Definition at line 202 of file verify_nbtree.c.

203 {
204  Oid indrelid = PG_GETARG_OID(0);
205  bool heapallindexed = false;
206 
207  if (PG_NARGS() == 2)
208  heapallindexed = PG_GETARG_BOOL(1);
209 
210  bt_index_check_internal(indrelid, false, heapallindexed, false);
211 
212  PG_RETURN_VOID();
213 }
#define PG_RETURN_VOID()
Definition: fmgr.h:349
#define PG_GETARG_OID(n)
Definition: fmgr.h:275
#define PG_NARGS()
Definition: fmgr.h:203
#define PG_GETARG_BOOL(n)
Definition: fmgr.h:274
unsigned int Oid
Definition: postgres_ext.h:31
static void bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed, bool rootdescend)

References bt_index_check_internal(), PG_GETARG_BOOL, PG_GETARG_OID, PG_NARGS, and PG_RETURN_VOID.

◆ bt_index_check_internal()

static void bt_index_check_internal ( Oid  indrelid,
bool  parentcheck,
bool  heapallindexed,
bool  rootdescend 
)
static

Definition at line 245 of file verify_nbtree.c.

247 {
248  Oid heapid;
249  Relation indrel;
250  Relation heaprel;
251  LOCKMODE lockmode;
252 
253  if (parentcheck)
254  lockmode = ShareLock;
255  else
256  lockmode = AccessShareLock;
257 
258  /*
259  * We must lock table before index to avoid deadlocks. However, if the
260  * passed indrelid isn't an index then IndexGetRelation() will fail.
261  * Rather than emitting a not-very-helpful error message, postpone
262  * complaining, expecting that the is-it-an-index test below will fail.
263  *
264  * In hot standby mode this will raise an error when parentcheck is true.
265  */
266  heapid = IndexGetRelation(indrelid, true);
267  if (OidIsValid(heapid))
268  heaprel = table_open(heapid, lockmode);
269  else
270  heaprel = NULL;
271 
272  /*
273  * Open the target index relations separately (like relation_openrv(), but
274  * with heap relation locked first to prevent deadlocking). In hot
275  * standby mode this will raise an error when parentcheck is true.
276  *
277  * There is no need for the usual indcheckxmin usability horizon test
278  * here, even in the heapallindexed case, because index undergoing
279  * verification only needs to have entries for a new transaction snapshot.
280  * (If this is a parentcheck verification, there is no question about
281  * committed or recently dead heap tuples lacking index entries due to
282  * concurrent activity.)
283  */
284  indrel = index_open(indrelid, lockmode);
285 
286  /*
287  * Since we did the IndexGetRelation call above without any lock, it's
288  * barely possible that a race against an index drop/recreation could have
289  * netted us the wrong table.
290  */
291  if (heaprel == NULL || heapid != IndexGetRelation(indrelid, false))
292  ereport(ERROR,
294  errmsg("could not open parent table of index \"%s\"",
295  RelationGetRelationName(indrel))));
296 
297  /* Relation suitable for checking as B-Tree? */
298  btree_index_checkable(indrel);
299 
300  if (btree_index_mainfork_expected(indrel))
301  {
302  bool heapkeyspace,
303  allequalimage;
304 
305  if (!smgrexists(RelationGetSmgr(indrel), MAIN_FORKNUM))
306  ereport(ERROR,
307  (errcode(ERRCODE_INDEX_CORRUPTED),
308  errmsg("index \"%s\" lacks a main relation fork",
309  RelationGetRelationName(indrel))));
310 
311  /* Extract metadata from metapage, and sanitize it in passing */
312  _bt_metaversion(indrel, &heapkeyspace, &allequalimage);
313  if (allequalimage && !heapkeyspace)
314  ereport(ERROR,
315  (errcode(ERRCODE_INDEX_CORRUPTED),
316  errmsg("index \"%s\" metapage has equalimage field set on unsupported nbtree version",
317  RelationGetRelationName(indrel))));
318  if (allequalimage && !_bt_allequalimage(indrel, false))
319  ereport(ERROR,
320  (errcode(ERRCODE_INDEX_CORRUPTED),
321  errmsg("index \"%s\" metapage incorrectly indicates that deduplication is safe",
322  RelationGetRelationName(indrel))));
323 
324  /* Check index, possibly against table it is an index on */
325  bt_check_every_level(indrel, heaprel, heapkeyspace, parentcheck,
326  heapallindexed, rootdescend);
327  }
328 
329  /*
330  * Release locks early. That's ok here because nothing in the called
331  * routines will trigger shared cache invalidations to be sent, so we can
332  * relax the usual pattern of only releasing locks after commit.
333  */
334  index_close(indrel, lockmode);
335  if (heaprel)
336  table_close(heaprel, lockmode);
337 }
#define OidIsValid(objectId)
Definition: c.h:710
Oid IndexGetRelation(Oid indexId, bool missing_ok)
Definition: index.c:3506
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:158
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:132
int LOCKMODE
Definition: lockdefs.h:26
#define AccessShareLock
Definition: lockdefs.h:36
#define ShareLock
Definition: lockdefs.h:40
void _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
Definition: nbtpage.c:736
bool _bt_allequalimage(Relation rel, bool debugmessage)
Definition: nbtutils.c:2692
#define ERRCODE_UNDEFINED_TABLE
Definition: pgbench.c:79
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:545
@ MAIN_FORKNUM
Definition: relpath.h:43
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:247
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:167
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:39
static void bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace, bool readonly, bool heapallindexed, bool rootdescend)
static bool btree_index_mainfork_expected(Relation rel)
static void btree_index_checkable(Relation rel)

References _bt_allequalimage(), _bt_metaversion(), AccessShareLock, bt_check_every_level(), btree_index_checkable(), btree_index_mainfork_expected(), ereport, errcode(), ERRCODE_UNDEFINED_TABLE, errmsg(), ERROR, index_close(), index_open(), IndexGetRelation(), MAIN_FORKNUM, OidIsValid, RelationGetRelationName, RelationGetSmgr(), ShareLock, smgrexists(), table_close(), and table_open().

Referenced by bt_index_check(), and bt_index_parent_check().

◆ bt_index_parent_check()

Datum bt_index_parent_check ( PG_FUNCTION_ARGS  )

Definition at line 225 of file verify_nbtree.c.

226 {
227  Oid indrelid = PG_GETARG_OID(0);
228  bool heapallindexed = false;
229  bool rootdescend = false;
230 
231  if (PG_NARGS() >= 2)
232  heapallindexed = PG_GETARG_BOOL(1);
233  if (PG_NARGS() == 3)
234  rootdescend = PG_GETARG_BOOL(2);
235 
236  bt_index_check_internal(indrelid, true, heapallindexed, rootdescend);
237 
238  PG_RETURN_VOID();
239 }

References bt_index_check_internal(), PG_GETARG_BOOL, PG_GETARG_OID, PG_NARGS, and PG_RETURN_VOID.

◆ bt_mkscankey_pivotsearch()

static BTScanInsert bt_mkscankey_pivotsearch ( Relation  rel,
IndexTuple  itup 
)
inlinestatic

Definition at line 3108 of file verify_nbtree.c.

3109 {
3110  BTScanInsert skey;
3111 
3112  skey = _bt_mkscankey(rel, itup);
3113  skey->pivotsearch = true;
3114 
3115  return skey;
3116 }
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:90
bool pivotsearch
Definition: nbtree.h:788

References _bt_mkscankey(), and BTScanInsertData::pivotsearch.

Referenced by bt_right_page_check_scankey(), and bt_target_page_check().

◆ bt_normalize_tuple()

static IndexTuple bt_normalize_tuple ( BtreeCheckState state,
IndexTuple  itup 
)
static

Definition at line 2526 of file verify_nbtree.c.

2527 {
2528  TupleDesc tupleDescriptor = RelationGetDescr(state->rel);
2529  Datum normalized[INDEX_MAX_KEYS];
2530  bool isnull[INDEX_MAX_KEYS];
2531  bool toast_free[INDEX_MAX_KEYS];
2532  bool formnewtup = false;
2533  IndexTuple reformed;
2534  int i;
2535 
2536  /* Caller should only pass "logical" non-pivot tuples here */
2537  Assert(!BTreeTupleIsPosting(itup) && !BTreeTupleIsPivot(itup));
2538 
2539  /* Easy case: It's immediately clear that tuple has no varlena datums */
2540  if (!IndexTupleHasVarwidths(itup))
2541  return itup;
2542 
2543  for (i = 0; i < tupleDescriptor->natts; i++)
2544  {
2545  Form_pg_attribute att;
2546 
2547  att = TupleDescAttr(tupleDescriptor, i);
2548 
2549  /* Assume untoasted/already normalized datum initially */
2550  toast_free[i] = false;
2551  normalized[i] = index_getattr(itup, att->attnum,
2552  tupleDescriptor,
2553  &isnull[i]);
2554  if (att->attbyval || att->attlen != -1 || isnull[i])
2555  continue;
2556 
2557  /*
2558  * Callers always pass a tuple that could safely be inserted into the
2559  * index without further processing, so an external varlena header
2560  * should never be encountered here
2561  */
2562  if (VARATT_IS_EXTERNAL(DatumGetPointer(normalized[i])))
2563  ereport(ERROR,
2564  (errcode(ERRCODE_INDEX_CORRUPTED),
2565  errmsg("external varlena datum in tuple that references heap row (%u,%u) in index \"%s\"",
2566  ItemPointerGetBlockNumber(&(itup->t_tid)),
2568  RelationGetRelationName(state->rel))));
2569  else if (VARATT_IS_COMPRESSED(DatumGetPointer(normalized[i])))
2570  {
2571  formnewtup = true;
2572  normalized[i] = PointerGetDatum(PG_DETOAST_DATUM(normalized[i]));
2573  toast_free[i] = true;
2574  }
2575  }
2576 
2577  /* Easier case: Tuple has varlena datums, none of which are compressed */
2578  if (!formnewtup)
2579  return itup;
2580 
2581  /*
2582  * Hard case: Tuple had compressed varlena datums that necessitate
2583  * creating normalized version of the tuple from uncompressed input datums
2584  * (normalized input datums). This is rather naive, but shouldn't be
2585  * necessary too often.
2586  *
2587  * Note that we rely on deterministic index_form_tuple() TOAST compression
2588  * of normalized input.
2589  */
2590  reformed = index_form_tuple(tupleDescriptor, normalized, isnull);
2591  reformed->t_tid = itup->t_tid;
2592 
2593  /* Cannot leak memory here */
2594  for (i = 0; i < tupleDescriptor->natts; i++)
2595  if (toast_free[i])
2596  pfree(DatumGetPointer(normalized[i]));
2597 
2598  return reformed;
2599 }
#define PG_DETOAST_DATUM(datum)
Definition: fmgr.h:240
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:47
int i
Definition: isn.c:73
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define index_getattr(tup, attnum, tupleDesc, isnull)
Definition: itup.h:99
#define IndexTupleHasVarwidths(itup)
Definition: itup.h:72
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:473
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:485
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:207
#define INDEX_MAX_KEYS
uintptr_t Datum
Definition: postgres.h:411
#define DatumGetPointer(X)
Definition: postgres.h:593
#define VARATT_IS_COMPRESSED(PTR)
Definition: postgres.h:325
#define VARATT_IS_EXTERNAL(PTR)
Definition: postgres.h:326
#define PointerGetDatum(X)
Definition: postgres.h:600
#define RelationGetDescr(relation)
Definition: rel.h:504
ItemPointerData t_tid
Definition: itup.h:37
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92

References Assert(), BTreeTupleIsPivot(), BTreeTupleIsPosting(), DatumGetPointer, ereport, errcode(), errmsg(), ERROR, i, index_form_tuple(), index_getattr, INDEX_MAX_KEYS, IndexTupleHasVarwidths, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, TupleDescData::natts, pfree(), PG_DETOAST_DATUM, PointerGetDatum, RelationGetDescr, RelationGetRelationName, IndexTupleData::t_tid, TupleDescAttr, VARATT_IS_COMPRESSED, and VARATT_IS_EXTERNAL.

Referenced by bt_target_page_check(), and bt_tuple_present_callback().

◆ bt_pivot_tuple_identical()

static bool bt_pivot_tuple_identical ( bool  heapkeyspace,
IndexTuple  itup1,
IndexTuple  itup2 
)
static

Definition at line 1751 of file verify_nbtree.c.

1752 {
1753  if (IndexTupleSize(itup1) != IndexTupleSize(itup2))
1754  return false;
1755 
1756  if (heapkeyspace)
1757  {
1758  /*
1759  * Offset number will contain important information in heapkeyspace
1760  * indexes: the number of attributes left in the pivot tuple following
1761  * suffix truncation. Don't skip over it (compare it too).
1762  */
1763  if (memcmp(&itup1->t_tid.ip_posid, &itup2->t_tid.ip_posid,
1764  IndexTupleSize(itup1) -
1765  offsetof(ItemPointerData, ip_posid)) != 0)
1766  return false;
1767  }
1768  else
1769  {
1770  /*
1771  * Cannot rely on offset number field having consistent value across
1772  * levels on pg_upgrade'd !heapkeyspace indexes. Compare contents of
1773  * tuple starting from just after item pointer (i.e. after block
1774  * number and offset number).
1775  */
1776  if (memcmp(&itup1->t_info, &itup2->t_info,
1777  IndexTupleSize(itup1) -
1778  offsetof(IndexTupleData, t_info)) != 0)
1779  return false;
1780  }
1781 
1782  return true;
1783 }
#define offsetof(type, field)
Definition: c.h:727
unsigned short t_info
Definition: itup.h:49
OffsetNumber ip_posid
Definition: itemptr.h:39

References IndexTupleSize, ItemPointerData::ip_posid, offsetof, IndexTupleData::t_info, and IndexTupleData::t_tid.

Referenced by bt_child_highkey_check().

◆ bt_posting_plain_tuple()

static IndexTuple bt_posting_plain_tuple ( IndexTuple  itup,
int  n 
)
inlinestatic

Definition at line 2616 of file verify_nbtree.c.

2617 {
2618  Assert(BTreeTupleIsPosting(itup));
2619 
2620  /* Returns non-posting-list tuple */
2621  return _bt_form_posting(itup, BTreeTupleGetPostingN(itup, n), 1);
2622 }
IndexTuple _bt_form_posting(IndexTuple base, ItemPointer htids, int nhtids)
Definition: nbtdedup.c:859
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:537

References _bt_form_posting(), Assert(), BTreeTupleGetPostingN(), and BTreeTupleIsPosting().

Referenced by bt_target_page_check().

◆ bt_recheck_sibling_links()

static void bt_recheck_sibling_links ( BtreeCheckState state,
BlockNumber  btpo_prev_from_target,
BlockNumber  leftcurrent 
)
static

Definition at line 912 of file verify_nbtree.c.

915 {
916  if (!state->readonly)
917  {
918  Buffer lbuf;
919  Buffer newtargetbuf;
920  Page page;
921  BTPageOpaque opaque;
922  BlockNumber newtargetblock;
923 
924  /* Couple locks in the usual order for nbtree: Left to right */
925  lbuf = ReadBufferExtended(state->rel, MAIN_FORKNUM, leftcurrent,
926  RBM_NORMAL, state->checkstrategy);
927  LockBuffer(lbuf, BT_READ);
928  _bt_checkpage(state->rel, lbuf);
929  page = BufferGetPage(lbuf);
930  opaque = (BTPageOpaque) PageGetSpecialPointer(page);
931  if (P_ISDELETED(opaque))
932  {
933  /*
934  * Cannot reason about concurrently deleted page -- the left link
935  * in the page to the right is expected to point to some other
936  * page to the left (not leftcurrent page).
937  *
938  * Note that we deliberately don't give up with a half-dead page.
939  */
940  UnlockReleaseBuffer(lbuf);
941  return;
942  }
943 
944  newtargetblock = opaque->btpo_next;
945  /* Avoid self-deadlock when newtargetblock == leftcurrent */
946  if (newtargetblock != leftcurrent)
947  {
948  newtargetbuf = ReadBufferExtended(state->rel, MAIN_FORKNUM,
949  newtargetblock, RBM_NORMAL,
950  state->checkstrategy);
951  LockBuffer(newtargetbuf, BT_READ);
952  _bt_checkpage(state->rel, newtargetbuf);
953  page = BufferGetPage(newtargetbuf);
954  opaque = (BTPageOpaque) PageGetSpecialPointer(page);
955  /* btpo_prev_from_target may have changed; update it */
956  btpo_prev_from_target = opaque->btpo_prev;
957  }
958  else
959  {
960  /*
961  * leftcurrent right sibling points back to leftcurrent block.
962  * Index is corrupt. Easiest way to handle this is to pretend
963  * that we actually read from a distinct page that has an invalid
964  * block number in its btpo_prev.
965  */
966  newtargetbuf = InvalidBuffer;
967  btpo_prev_from_target = InvalidBlockNumber;
968  }
969 
970  /*
971  * No need to check P_ISDELETED here, since new target block cannot be
972  * marked deleted as long as we hold a lock on lbuf
973  */
974  if (BufferIsValid(newtargetbuf))
975  UnlockReleaseBuffer(newtargetbuf);
976  UnlockReleaseBuffer(lbuf);
977 
978  if (btpo_prev_from_target == leftcurrent)
979  {
980  /* Report split in left sibling, not target (or new target) */
981  ereport(DEBUG1,
982  (errcode(ERRCODE_INTERNAL_ERROR),
983  errmsg_internal("harmless concurrent page split detected in index \"%s\"",
985  errdetail_internal("Block=%u new right sibling=%u original right sibling=%u.",
986  leftcurrent, newtargetblock,
987  state->targetblock)));
988  return;
989  }
990 
991  /*
992  * Index is corrupt. Make sure that we report correct target page.
993  *
994  * This could have changed in cases where there was a concurrent page
995  * split, as well as index corruption (at least in theory). Note that
996  * btpo_prev_from_target was already updated above.
997  */
998  state->targetblock = newtargetblock;
999  }
1000 
1001  ereport(ERROR,
1002  (errcode(ERRCODE_INDEX_CORRUPTED),
1003  errmsg("left link/right link pair in index \"%s\" not in agreement",
1005  errdetail_internal("Block=%u left block=%u left link from block=%u.",
1006  state->targetblock, leftcurrent,
1007  btpo_prev_from_target)));
1008 }
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3780
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3996
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:741
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
@ RBM_NORMAL
Definition: bufmgr.h:39
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
void _bt_checkpage(Relation rel, Buffer buf)
Definition: nbtpage.c:794
#define BT_READ
Definition: nbtree.h:712

References _bt_checkpage(), BT_READ, BTPageOpaqueData::btpo_next, BTPageOpaqueData::btpo_prev, BufferGetPage, BufferIsValid, DEBUG1, ereport, errcode(), errdetail_internal(), errmsg(), errmsg_internal(), ERROR, InvalidBlockNumber, InvalidBuffer, LockBuffer(), MAIN_FORKNUM, P_ISDELETED, PageGetSpecialPointer, RBM_NORMAL, ReadBufferExtended(), RelationGetRelationName, and UnlockReleaseBuffer().

Referenced by bt_check_level_from_leftmost().

◆ bt_right_page_check_scankey()

static BTScanInsert bt_right_page_check_scankey ( BtreeCheckState state)
static

Definition at line 1545 of file verify_nbtree.c.

1546 {
1547  BTPageOpaque opaque;
1548  ItemId rightitem;
1549  IndexTuple firstitup;
1550  BlockNumber targetnext;
1551  Page rightpage;
1552  OffsetNumber nline;
1553 
1554  /* Determine target's next block number */
1555  opaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
1556 
1557  /* If target is already rightmost, no right sibling; nothing to do here */
1558  if (P_RIGHTMOST(opaque))
1559  return NULL;
1560 
1561  /*
1562  * General notes on concurrent page splits and page deletion:
1563  *
1564  * Routines like _bt_search() don't require *any* page split interlock
1565  * when descending the tree, including something very light like a buffer
1566  * pin. That's why it's okay that we don't either. This avoidance of any
1567  * need to "couple" buffer locks is the raison d' etre of the Lehman & Yao
1568  * algorithm, in fact.
1569  *
1570  * That leaves deletion. A deleted page won't actually be recycled by
1571  * VACUUM early enough for us to fail to at least follow its right link
1572  * (or left link, or downlink) and find its sibling, because recycling
1573  * does not occur until no possible index scan could land on the page.
1574  * Index scans can follow links with nothing more than their snapshot as
1575  * an interlock and be sure of at least that much. (See page
1576  * recycling/"visible to everyone" notes in nbtree README.)
1577  *
1578  * Furthermore, it's okay if we follow a rightlink and find a half-dead or
1579  * dead (ignorable) page one or more times. There will either be a
1580  * further right link to follow that leads to a live page before too long
1581  * (before passing by parent's rightmost child), or we will find the end
1582  * of the entire level instead (possible when parent page is itself the
1583  * rightmost on its level).
1584  */
1585  targetnext = opaque->btpo_next;
1586  for (;;)
1587  {
1589 
1590  rightpage = palloc_btree_page(state, targetnext);
1591  opaque = (BTPageOpaque) PageGetSpecialPointer(rightpage);
1592 
1593  if (!P_IGNORE(opaque) || P_RIGHTMOST(opaque))
1594  break;
1595 
1596  /*
1597  * We landed on a deleted or half-dead sibling page. Step right until
1598  * we locate a live sibling page.
1599  */
1600  ereport(DEBUG2,
1601  (errcode(ERRCODE_NO_DATA),
1602  errmsg_internal("level %u sibling page in block %u of index \"%s\" was found deleted or half dead",
1603  opaque->btpo_level, targetnext, RelationGetRelationName(state->rel)),
1604  errdetail_internal("Deleted page found when building scankey from right sibling.")));
1605 
1606  targetnext = opaque->btpo_next;
1607 
1608  /* Be slightly more pro-active in freeing this memory, just in case */
1609  pfree(rightpage);
1610  }
1611 
1612  /*
1613  * No ShareLock held case -- why it's safe to proceed.
1614  *
1615  * Problem:
1616  *
1617  * We must avoid false positive reports of corruption when caller treats
1618  * item returned here as an upper bound on target's last item. In
1619  * general, false positives are disallowed. Avoiding them here when
1620  * caller is !readonly is subtle.
1621  *
1622  * A concurrent page deletion by VACUUM of the target page can result in
1623  * the insertion of items on to this right sibling page that would
1624  * previously have been inserted on our target page. There might have
1625  * been insertions that followed the target's downlink after it was made
1626  * to point to right sibling instead of target by page deletion's first
1627  * phase. The inserters insert items that would belong on target page.
1628  * This race is very tight, but it's possible. This is our only problem.
1629  *
1630  * Non-problems:
1631  *
1632  * We are not hindered by a concurrent page split of the target; we'll
1633  * never land on the second half of the page anyway. A concurrent split
1634  * of the right page will also not matter, because the first data item
1635  * remains the same within the left half, which we'll reliably land on. If
1636  * we had to skip over ignorable/deleted pages, it cannot matter because
1637  * their key space has already been atomically merged with the first
1638  * non-ignorable page we eventually find (doesn't matter whether the page
1639  * we eventually find is a true sibling or a cousin of target, which we go
1640  * into below).
1641  *
1642  * Solution:
1643  *
1644  * Caller knows that it should reverify that target is not ignorable
1645  * (half-dead or deleted) when cross-page sibling item comparison appears
1646  * to indicate corruption (invariant fails). This detects the single race
1647  * condition that exists for caller. This is correct because the
1648  * continued existence of target block as non-ignorable (not half-dead or
1649  * deleted) implies that target page was not merged into from the right by
1650  * deletion; the key space at or after target never moved left. Target's
1651  * parent either has the same downlink to target as before, or a <
1652  * downlink due to deletion at the left of target. Target either has the
1653  * same highkey as before, or a highkey < before when there is a page
1654  * split. (The rightmost concurrently-split-from-target-page page will
1655  * still have the same highkey as target was originally found to have,
1656  * which for our purposes is equivalent to target's highkey itself never
1657  * changing, since we reliably skip over
1658  * concurrently-split-from-target-page pages.)
1659  *
1660  * In simpler terms, we allow that the key space of the target may expand
1661  * left (the key space can move left on the left side of target only), but
1662  * the target key space cannot expand right and get ahead of us without
1663  * our detecting it. The key space of the target cannot shrink, unless it
1664  * shrinks to zero due to the deletion of the original page, our canary
1665  * condition. (To be very precise, we're a bit stricter than that because
1666  * it might just have been that the target page split and only the
1667  * original target page was deleted. We can be more strict, just not more
1668  * lax.)
1669  *
1670  * Top level tree walk caller moves on to next page (makes it the new
1671  * target) following recovery from this race. (cf. The rationale for
1672  * child/downlink verification needing a ShareLock within
1673  * bt_child_check(), where page deletion is also the main source of
1674  * trouble.)
1675  *
1676  * Note that it doesn't matter if right sibling page here is actually a
1677  * cousin page, because in order for the key space to be readjusted in a
1678  * way that causes us issues in next level up (guiding problematic
1679  * concurrent insertions to the cousin from the grandparent rather than to
1680  * the sibling from the parent), there'd have to be page deletion of
1681  * target's parent page (affecting target's parent's downlink in target's
1682  * grandparent page). Internal page deletion only occurs when there are
1683  * no child pages (they were all fully deleted), and caller is checking
1684  * that the target's parent has at least one non-deleted (so
1685  * non-ignorable) child: the target page. (Note that the first phase of
1686  * deletion atomically marks the page to be deleted half-dead/ignorable at
1687  * the same time downlink in its parent is removed, so caller will
1688  * definitely not fail to detect that this happened.)
1689  *
1690  * This trick is inspired by the method backward scans use for dealing
1691  * with concurrent page splits; concurrent page deletion is a problem that
1692  * similarly receives special consideration sometimes (it's possible that
1693  * the backwards scan will re-read its "original" block after failing to
1694  * find a right-link to it, having already moved in the opposite direction
1695  * (right/"forwards") a few times to try to locate one). Just like us,
1696  * that happens only to determine if there was a concurrent page deletion
1697  * of a reference page, and just like us if there was a page deletion of
1698  * that reference page it means we can move on from caring about the
1699  * reference page. See the nbtree README for a full description of how
1700  * that works.
1701  */
1702  nline = PageGetMaxOffsetNumber(rightpage);
1703 
1704  /*
1705  * Get first data item, if any
1706  */
1707  if (P_ISLEAF(opaque) && nline >= P_FIRSTDATAKEY(opaque))
1708  {
1709  /* Return first data item (if any) */
1710  rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
1711  P_FIRSTDATAKEY(opaque));
1712  }
1713  else if (!P_ISLEAF(opaque) &&
1714  nline >= OffsetNumberNext(P_FIRSTDATAKEY(opaque)))
1715  {
1716  /*
1717  * Return first item after the internal page's "negative infinity"
1718  * item
1719  */
1720  rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
1721  OffsetNumberNext(P_FIRSTDATAKEY(opaque)));
1722  }
1723  else
1724  {
1725  /*
1726  * No first item. Page is probably empty leaf page, but it's also
1727  * possible that it's an internal page with only a negative infinity
1728  * item.
1729  */
1730  ereport(DEBUG2,
1731  (errcode(ERRCODE_NO_DATA),
1732  errmsg_internal("%s block %u of index \"%s\" has no first data item",
1733  P_ISLEAF(opaque) ? "leaf" : "internal", targetnext,
1734  RelationGetRelationName(state->rel))));
1735  return NULL;
1736  }
1737 
1738  /*
1739  * Return first real item scankey. Note that this relies on right page
1740  * memory remaining allocated.
1741  */
1742  firstitup = (IndexTuple) PageGetItem(rightpage, rightitem);
1743  return bt_mkscankey_pivotsearch(state->rel, firstitup);
1744 }
#define DEBUG2
Definition: elog.h:23
static BTScanInsert bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup)

References bt_mkscankey_pivotsearch(), BTPageOpaqueData::btpo_level, BTPageOpaqueData::btpo_next, CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errcode(), errdetail_internal(), errmsg_internal(), OffsetNumberNext, P_FIRSTDATAKEY, P_IGNORE, P_ISLEAF, P_RIGHTMOST, PageGetItem, PageGetItemIdCareful(), PageGetMaxOffsetNumber, PageGetSpecialPointer, palloc_btree_page(), pfree(), and RelationGetRelationName.

Referenced by bt_target_page_check().

◆ bt_rootdescend()

static bool bt_rootdescend ( BtreeCheckState state,
IndexTuple  itup 
)
static

Definition at line 2649 of file verify_nbtree.c.

2650 {
2651  BTScanInsert key;
2652  BTStack stack;
2653  Buffer lbuf;
2654  bool exists;
2655 
2656  key = _bt_mkscankey(state->rel, itup);
2657  Assert(key->heapkeyspace && key->scantid != NULL);
2658 
2659  /*
2660  * Search from root.
2661  *
2662  * Ideally, we would arrange to only move right within _bt_search() when
2663  * an interrupted page split is detected (i.e. when the incomplete split
2664  * bit is found to be set), but for now we accept the possibility that
2665  * that could conceal an inconsistency.
2666  */
2667  Assert(state->readonly && state->rootdescend);
2668  exists = false;
2669  stack = _bt_search(state->rel, key, &lbuf, BT_READ, NULL);
2670 
2671  if (BufferIsValid(lbuf))
2672  {
2673  BTInsertStateData insertstate;
2674  OffsetNumber offnum;
2675  Page page;
2676 
2677  insertstate.itup = itup;
2678  insertstate.itemsz = MAXALIGN(IndexTupleSize(itup));
2679  insertstate.itup_key = key;
2680  insertstate.postingoff = 0;
2681  insertstate.bounds_valid = false;
2682  insertstate.buf = lbuf;
2683 
2684  /* Get matching tuple on leaf page */
2685  offnum = _bt_binsrch_insert(state->rel, &insertstate);
2686  /* Compare first >= matching item on leaf page, if any */
2687  page = BufferGetPage(lbuf);
2688  /* Should match on first heap TID when tuple has a posting list */
2689  if (offnum <= PageGetMaxOffsetNumber(page) &&
2690  insertstate.postingoff <= 0 &&
2691  _bt_compare(state->rel, key, page, offnum) == 0)
2692  exists = true;
2693  _bt_relbuf(state->rel, lbuf);
2694  }
2695 
2696  _bt_freestack(stack);
2697  pfree(key);
2698 
2699  return exists;
2700 }
#define MAXALIGN(LEN)
Definition: c.h:757
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1035
OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate)
Definition: nbtsearch.c:442
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:656
BTStack _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access, Snapshot snapshot)
Definition: nbtsearch.c:96
void _bt_freestack(BTStack stack)
Definition: nbtutils.c:175
bool bounds_valid
Definition: nbtree.h:821
IndexTuple itup
Definition: nbtree.h:809
BTScanInsert itup_key
Definition: nbtree.h:811

References _bt_binsrch_insert(), _bt_compare(), _bt_freestack(), _bt_mkscankey(), _bt_relbuf(), _bt_search(), Assert(), BTInsertStateData::bounds_valid, BT_READ, BTInsertStateData::buf, BufferGetPage, BufferIsValid, IndexTupleSize, BTInsertStateData::itemsz, BTInsertStateData::itup, BTInsertStateData::itup_key, sort-test::key, MAXALIGN, PageGetMaxOffsetNumber, pfree(), and BTInsertStateData::postingoff.

Referenced by bt_target_page_check().

◆ bt_target_page_check()

static void bt_target_page_check ( BtreeCheckState state)
static

Definition at line 1046 of file verify_nbtree.c.

1047 {
1048  OffsetNumber offset;
1049  OffsetNumber max;
1050  BTPageOpaque topaque;
1051 
1052  topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
1053  max = PageGetMaxOffsetNumber(state->target);
1054 
1055  elog(DEBUG2, "verifying %u items on %s block %u", max,
1056  P_ISLEAF(topaque) ? "leaf" : "internal", state->targetblock);
1057 
1058  /*
1059  * Check the number of attributes in high key. Note, rightmost page
1060  * doesn't contain a high key, so nothing to check
1061  */
1062  if (!P_RIGHTMOST(topaque))
1063  {
1064  ItemId itemid;
1065  IndexTuple itup;
1066 
1067  /* Verify line pointer before checking tuple */
1068  itemid = PageGetItemIdCareful(state, state->targetblock,
1069  state->target, P_HIKEY);
1070  if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
1071  P_HIKEY))
1072  {
1073  itup = (IndexTuple) PageGetItem(state->target, itemid);
1074  ereport(ERROR,
1075  (errcode(ERRCODE_INDEX_CORRUPTED),
1076  errmsg("wrong number of high key index tuple attributes in index \"%s\"",
1078  errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.",
1079  state->targetblock,
1080  BTreeTupleGetNAtts(itup, state->rel),
1081  P_ISLEAF(topaque) ? "heap" : "index",
1082  LSN_FORMAT_ARGS(state->targetlsn))));
1083  }
1084  }
1085 
1086  /*
1087  * Loop over page items, starting from first non-highkey item, not high
1088  * key (if any). Most tests are not performed for the "negative infinity"
1089  * real item (if any).
1090  */
1091  for (offset = P_FIRSTDATAKEY(topaque);
1092  offset <= max;
1093  offset = OffsetNumberNext(offset))
1094  {
1095  ItemId itemid;
1096  IndexTuple itup;
1097  size_t tupsize;
1098  BTScanInsert skey;
1099  bool lowersizelimit;
1100  ItemPointer scantid;
1101 
1103 
1104  itemid = PageGetItemIdCareful(state, state->targetblock,
1105  state->target, offset);
1106  itup = (IndexTuple) PageGetItem(state->target, itemid);
1107  tupsize = IndexTupleSize(itup);
1108 
1109  /*
1110  * lp_len should match the IndexTuple reported length exactly, since
1111  * lp_len is completely redundant in indexes, and both sources of
1112  * tuple length are MAXALIGN()'d. nbtree does not use lp_len all that
1113  * frequently, and is surprisingly tolerant of corrupt lp_len fields.
1114  */
1115  if (tupsize != ItemIdGetLength(itemid))
1116  ereport(ERROR,
1117  (errcode(ERRCODE_INDEX_CORRUPTED),
1118  errmsg("index tuple size does not equal lp_len in index \"%s\"",
1120  errdetail_internal("Index tid=(%u,%u) tuple size=%zu lp_len=%u page lsn=%X/%X.",
1121  state->targetblock, offset,
1122  tupsize, ItemIdGetLength(itemid),
1123  LSN_FORMAT_ARGS(state->targetlsn)),
1124  errhint("This could be a torn page problem.")));
1125 
1126  /* Check the number of index tuple attributes */
1127  if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
1128  offset))
1129  {
1130  ItemPointer tid;
1131  char *itid,
1132  *htid;
1133 
1134  itid = psprintf("(%u,%u)", state->targetblock, offset);
1135  tid = BTreeTupleGetPointsToTID(itup);
1136  htid = psprintf("(%u,%u)",
1139 
1140  ereport(ERROR,
1141  (errcode(ERRCODE_INDEX_CORRUPTED),
1142  errmsg("wrong number of index tuple attributes in index \"%s\"",
1144  errdetail_internal("Index tid=%s natts=%u points to %s tid=%s page lsn=%X/%X.",
1145  itid,
1146  BTreeTupleGetNAtts(itup, state->rel),
1147  P_ISLEAF(topaque) ? "heap" : "index",
1148  htid,
1149  LSN_FORMAT_ARGS(state->targetlsn))));
1150  }
1151 
1152  /*
1153  * Don't try to generate scankey using "negative infinity" item on
1154  * internal pages. They are always truncated to zero attributes.
1155  */
1156  if (offset_is_negative_infinity(topaque, offset))
1157  {
1158  /*
1159  * We don't call bt_child_check() for "negative infinity" items.
1160  * But if we're performing downlink connectivity check, we do it
1161  * for every item including "negative infinity" one.
1162  */
1163  if (!P_ISLEAF(topaque) && state->readonly)
1164  {
1166  offset,
1167  NULL,
1168  topaque->btpo_level);
1169  }
1170  continue;
1171  }
1172 
1173  /*
1174  * Readonly callers may optionally verify that non-pivot tuples can
1175  * each be found by an independent search that starts from the root.
1176  * Note that we deliberately don't do individual searches for each
1177  * TID, since the posting list itself is validated by other checks.
1178  */
1179  if (state->rootdescend && P_ISLEAF(topaque) &&
1180  !bt_rootdescend(state, itup))
1181  {
1183  char *itid,
1184  *htid;
1185 
1186  itid = psprintf("(%u,%u)", state->targetblock, offset);
1187  htid = psprintf("(%u,%u)", ItemPointerGetBlockNumber(tid),
1189 
1190  ereport(ERROR,
1191  (errcode(ERRCODE_INDEX_CORRUPTED),
1192  errmsg("could not find tuple using search from root page in index \"%s\"",
1194  errdetail_internal("Index tid=%s points to heap tid=%s page lsn=%X/%X.",
1195  itid, htid,
1196  LSN_FORMAT_ARGS(state->targetlsn))));
1197  }
1198 
1199  /*
1200  * If tuple is a posting list tuple, make sure posting list TIDs are
1201  * in order
1202  */
1203  if (BTreeTupleIsPosting(itup))
1204  {
1205  ItemPointerData last;
1206  ItemPointer current;
1207 
1208  ItemPointerCopy(BTreeTupleGetHeapTID(itup), &last);
1209 
1210  for (int i = 1; i < BTreeTupleGetNPosting(itup); i++)
1211  {
1212 
1213  current = BTreeTupleGetPostingN(itup, i);
1214 
1215  if (ItemPointerCompare(current, &last) <= 0)
1216  {
1217  char *itid = psprintf("(%u,%u)", state->targetblock, offset);
1218 
1219  ereport(ERROR,
1220  (errcode(ERRCODE_INDEX_CORRUPTED),
1221  errmsg_internal("posting list contains misplaced TID in index \"%s\"",
1223  errdetail_internal("Index tid=%s posting list offset=%d page lsn=%X/%X.",
1224  itid, i,
1225  LSN_FORMAT_ARGS(state->targetlsn))));
1226  }
1227 
1228  ItemPointerCopy(current, &last);
1229  }
1230  }
1231 
1232  /* Build insertion scankey for current page offset */
1233  skey = bt_mkscankey_pivotsearch(state->rel, itup);
1234 
1235  /*
1236  * Make sure tuple size does not exceed the relevant BTREE_VERSION
1237  * specific limit.
1238  *
1239  * BTREE_VERSION 4 (which introduced heapkeyspace rules) requisitioned
1240  * a small amount of space from BTMaxItemSize() in order to ensure
1241  * that suffix truncation always has enough space to add an explicit
1242  * heap TID back to a tuple -- we pessimistically assume that every
1243  * newly inserted tuple will eventually need to have a heap TID
1244  * appended during a future leaf page split, when the tuple becomes
1245  * the basis of the new high key (pivot tuple) for the leaf page.
1246  *
1247  * Since the reclaimed space is reserved for that purpose, we must not
1248  * enforce the slightly lower limit when the extra space has been used
1249  * as intended. In other words, there is only a cross-version
1250  * difference in the limit on tuple size within leaf pages.
1251  *
1252  * Still, we're particular about the details within BTREE_VERSION 4
1253  * internal pages. Pivot tuples may only use the extra space for its
1254  * designated purpose. Enforce the lower limit for pivot tuples when
1255  * an explicit heap TID isn't actually present. (In all other cases
1256  * suffix truncation is guaranteed to generate a pivot tuple that's no
1257  * larger than the firstright tuple provided to it by its caller.)
1258  */
1259  lowersizelimit = skey->heapkeyspace &&
1260  (P_ISLEAF(topaque) || BTreeTupleGetHeapTID(itup) == NULL);
1261  if (tupsize > (lowersizelimit ? BTMaxItemSize(state->target) :
1262  BTMaxItemSizeNoHeapTid(state->target)))
1263  {
1265  char *itid,
1266  *htid;
1267 
1268  itid = psprintf("(%u,%u)", state->targetblock, offset);
1269  htid = psprintf("(%u,%u)",
1272 
1273  ereport(ERROR,
1274  (errcode(ERRCODE_INDEX_CORRUPTED),
1275  errmsg("index row size %zu exceeds maximum for index \"%s\"",
1276  tupsize, RelationGetRelationName(state->rel)),
1277  errdetail_internal("Index tid=%s points to %s tid=%s page lsn=%X/%X.",
1278  itid,
1279  P_ISLEAF(topaque) ? "heap" : "index",
1280  htid,
1281  LSN_FORMAT_ARGS(state->targetlsn))));
1282  }
1283 
1284  /* Fingerprint leaf page tuples (those that point to the heap) */
1285  if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid))
1286  {
1287  IndexTuple norm;
1288 
1289  if (BTreeTupleIsPosting(itup))
1290  {
1291  /* Fingerprint all elements as distinct "plain" tuples */
1292  for (int i = 0; i < BTreeTupleGetNPosting(itup); i++)
1293  {
1294  IndexTuple logtuple;
1295 
1296  logtuple = bt_posting_plain_tuple(itup, i);
1297  norm = bt_normalize_tuple(state, logtuple);
1298  bloom_add_element(state->filter, (unsigned char *) norm,
1299  IndexTupleSize(norm));
1300  /* Be tidy */
1301  if (norm != logtuple)
1302  pfree(norm);
1303  pfree(logtuple);
1304  }
1305  }
1306  else
1307  {
1308  norm = bt_normalize_tuple(state, itup);
1309  bloom_add_element(state->filter, (unsigned char *) norm,
1310  IndexTupleSize(norm));
1311  /* Be tidy */
1312  if (norm != itup)
1313  pfree(norm);
1314  }
1315  }
1316 
1317  /*
1318  * * High key check *
1319  *
1320  * If there is a high key (if this is not the rightmost page on its
1321  * entire level), check that high key actually is upper bound on all
1322  * page items. If this is a posting list tuple, we'll need to set
1323  * scantid to be highest TID in posting list.
1324  *
1325  * We prefer to check all items against high key rather than checking
1326  * just the last and trusting that the operator class obeys the
1327  * transitive law (which implies that all previous items also
1328  * respected the high key invariant if they pass the item order
1329  * check).
1330  *
1331  * Ideally, we'd compare every item in the index against every other
1332  * item in the index, and not trust opclass obedience of the
1333  * transitive law to bridge the gap between children and their
1334  * grandparents (as well as great-grandparents, and so on). We don't
1335  * go to those lengths because that would be prohibitively expensive,
1336  * and probably not markedly more effective in practice.
1337  *
1338  * On the leaf level, we check that the key is <= the highkey.
1339  * However, on non-leaf levels we check that the key is < the highkey,
1340  * because the high key is "just another separator" rather than a copy
1341  * of some existing key item; we expect it to be unique among all keys
1342  * on the same level. (Suffix truncation will sometimes produce a
1343  * leaf highkey that is an untruncated copy of the lastleft item, but
1344  * never any other item, which necessitates weakening the leaf level
1345  * check to <=.)
1346  *
1347  * Full explanation for why a highkey is never truly a copy of another
1348  * item from the same level on internal levels:
1349  *
1350  * While the new left page's high key is copied from the first offset
1351  * on the right page during an internal page split, that's not the
1352  * full story. In effect, internal pages are split in the middle of
1353  * the firstright tuple, not between the would-be lastleft and
1354  * firstright tuples: the firstright key ends up on the left side as
1355  * left's new highkey, and the firstright downlink ends up on the
1356  * right side as right's new "negative infinity" item. The negative
1357  * infinity tuple is truncated to zero attributes, so we're only left
1358  * with the downlink. In other words, the copying is just an
1359  * implementation detail of splitting in the middle of a (pivot)
1360  * tuple. (See also: "Notes About Data Representation" in the nbtree
1361  * README.)
1362  */
1363  scantid = skey->scantid;
1364  if (state->heapkeyspace && BTreeTupleIsPosting(itup))
1365  skey->scantid = BTreeTupleGetMaxHeapTID(itup);
1366 
1367  if (!P_RIGHTMOST(topaque) &&
1368  !(P_ISLEAF(topaque) ? invariant_leq_offset(state, skey, P_HIKEY) :
1369  invariant_l_offset(state, skey, P_HIKEY)))
1370  {
1372  char *itid,
1373  *htid;
1374 
1375  itid = psprintf("(%u,%u)", state->targetblock, offset);
1376  htid = psprintf("(%u,%u)",
1379 
1380  ereport(ERROR,
1381  (errcode(ERRCODE_INDEX_CORRUPTED),
1382  errmsg("high key invariant violated for index \"%s\"",
1384  errdetail_internal("Index tid=%s points to %s tid=%s page lsn=%X/%X.",
1385  itid,
1386  P_ISLEAF(topaque) ? "heap" : "index",
1387  htid,
1388  LSN_FORMAT_ARGS(state->targetlsn))));
1389  }
1390  /* Reset, in case scantid was set to (itup) posting tuple's max TID */
1391  skey->scantid = scantid;
1392 
1393  /*
1394  * * Item order check *
1395  *
1396  * Check that items are stored on page in logical order, by checking
1397  * current item is strictly less than next item (if any).
1398  */
1399  if (OffsetNumberNext(offset) <= max &&
1400  !invariant_l_offset(state, skey, OffsetNumberNext(offset)))
1401  {
1402  ItemPointer tid;
1403  char *itid,
1404  *htid,
1405  *nitid,
1406  *nhtid;
1407 
1408  itid = psprintf("(%u,%u)", state->targetblock, offset);
1409  tid = BTreeTupleGetPointsToTID(itup);
1410  htid = psprintf("(%u,%u)",
1413  nitid = psprintf("(%u,%u)", state->targetblock,
1414  OffsetNumberNext(offset));
1415 
1416  /* Reuse itup to get pointed-to heap location of second item */
1417  itemid = PageGetItemIdCareful(state, state->targetblock,
1418  state->target,
1419  OffsetNumberNext(offset));
1420  itup = (IndexTuple) PageGetItem(state->target, itemid);
1421  tid = BTreeTupleGetPointsToTID(itup);
1422  nhtid = psprintf("(%u,%u)",
1425 
1426  ereport(ERROR,
1427  (errcode(ERRCODE_INDEX_CORRUPTED),
1428  errmsg("item order invariant violated for index \"%s\"",
1430  errdetail_internal("Lower index tid=%s (points to %s tid=%s) "
1431  "higher index tid=%s (points to %s tid=%s) "
1432  "page lsn=%X/%X.",
1433  itid,
1434  P_ISLEAF(topaque) ? "heap" : "index",
1435  htid,
1436  nitid,
1437  P_ISLEAF(topaque) ? "heap" : "index",
1438  nhtid,
1439  LSN_FORMAT_ARGS(state->targetlsn))));
1440  }
1441 
1442  /*
1443  * * Last item check *
1444  *
1445  * Check last item against next/right page's first data item's when
1446  * last item on page is reached. This additional check will detect
1447  * transposed pages iff the supposed right sibling page happens to
1448  * belong before target in the key space. (Otherwise, a subsequent
1449  * heap verification will probably detect the problem.)
1450  *
1451  * This check is similar to the item order check that will have
1452  * already been performed for every other "real" item on target page
1453  * when last item is checked. The difference is that the next item
1454  * (the item that is compared to target's last item) needs to come
1455  * from the next/sibling page. There may not be such an item
1456  * available from sibling for various reasons, though (e.g., target is
1457  * the rightmost page on level).
1458  */
1459  else if (offset == max)
1460  {
1461  BTScanInsert rightkey;
1462 
1463  /* Get item in next/right page */
1464  rightkey = bt_right_page_check_scankey(state);
1465 
1466  if (rightkey &&
1467  !invariant_g_offset(state, rightkey, max))
1468  {
1469  /*
1470  * As explained at length in bt_right_page_check_scankey(),
1471  * there is a known !readonly race that could account for
1472  * apparent violation of invariant, which we must check for
1473  * before actually proceeding with raising error. Our canary
1474  * condition is that target page was deleted.
1475  */
1476  if (!state->readonly)
1477  {
1478  /* Get fresh copy of target page */
1479  state->target = palloc_btree_page(state, state->targetblock);
1480  /* Note that we deliberately do not update target LSN */
1481  topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
1482 
1483  /*
1484  * All !readonly checks now performed; just return
1485  */
1486  if (P_IGNORE(topaque))
1487  return;
1488  }
1489 
1490  ereport(ERROR,
1491  (errcode(ERRCODE_INDEX_CORRUPTED),
1492  errmsg("cross page item order invariant violated for index \"%s\"",
1494  errdetail_internal("Last item on page tid=(%u,%u) page lsn=%X/%X.",
1495  state->targetblock, offset,
1496  LSN_FORMAT_ARGS(state->targetlsn))));
1497  }
1498  }
1499 
1500  /*
1501  * * Downlink check *
1502  *
1503  * Additional check of child items iff this is an internal page and
1504  * caller holds a ShareLock. This happens for every downlink (item)
1505  * in target excluding the negative-infinity downlink (again, this is
1506  * because it has no useful value to compare).
1507  */
1508  if (!P_ISLEAF(topaque) && state->readonly)
1509  bt_child_check(state, skey, offset);
1510  }
1511 
1512  /*
1513  * Special case bt_child_highkey_check() call
1514  *
1515  * We don't pass a real downlink, but we've to finish the level
1516  * processing. If condition is satisfied, we've already processed all the
1517  * downlinks from the target level. But there still might be pages to the
1518  * right of the child page pointer to by our rightmost downlink. And they
1519  * might have missing downlinks. This final call checks for them.
1520  */
1521  if (!P_ISLEAF(topaque) && P_RIGHTMOST(topaque) && state->readonly)
1522  {
1524  NULL, topaque->btpo_level);
1525  }
1526 }
void bloom_add_element(bloom_filter *filter, unsigned char *elem, size_t len)
Definition: bloomfilter.c:135
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
#define ItemPointerGetBlockNumberNoCheck(pointer)
Definition: itemptr.h:89
#define ItemPointerCopy(fromPointer, toPointer)
Definition: itemptr.h:161
#define ItemPointerGetOffsetNumberNoCheck(pointer)
Definition: itemptr.h:108
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:511
#define BTMaxItemSizeNoHeapTid(page)
Definition: nbtree.h:168
#define BTMaxItemSize(page)
Definition: nbtree.h:162
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:657
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:631
#define BTreeTupleGetNAtts(itup, rel)
Definition: nbtree.h:570
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition: nbtutils.c:2466
#define InvalidOffsetNumber
Definition: off.h:26
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
ItemPointer scantid
Definition: nbtree.h:789
bool heapkeyspace
Definition: nbtree.h:784
static bool invariant_l_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound)
static ItemPointer BTreeTupleGetPointsToTID(IndexTuple itup)
static IndexTuple bt_posting_plain_tuple(IndexTuple itup, int n)
static bool invariant_leq_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound)
static bool bt_rootdescend(BtreeCheckState *state, IndexTuple itup)
static BTScanInsert bt_right_page_check_scankey(BtreeCheckState *state)
static bool invariant_g_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber lowerbound)
static IndexTuple bt_normalize_tuple(BtreeCheckState *state, IndexTuple itup)
static void bt_child_check(BtreeCheckState *state, BTScanInsert targetkey, OffsetNumber downlinkoffnum)

References _bt_check_natts(), bloom_add_element(), bt_child_check(), bt_child_highkey_check(), bt_mkscankey_pivotsearch(), bt_normalize_tuple(), bt_posting_plain_tuple(), bt_right_page_check_scankey(), bt_rootdescend(), BTMaxItemSize, BTMaxItemSizeNoHeapTid, BTPageOpaqueData::btpo_level, BTreeTupleGetHeapTID(), BTreeTupleGetMaxHeapTID(), BTreeTupleGetNAtts, BTreeTupleGetNPosting(), BTreeTupleGetPointsToTID(), BTreeTupleGetPostingN(), BTreeTupleIsPosting(), CHECK_FOR_INTERRUPTS, DEBUG2, elog, ereport, errcode(), errdetail_internal(), errhint(), errmsg(), errmsg_internal(), ERROR, BTScanInsertData::heapkeyspace, i, IndexTupleSize, InvalidOffsetNumber, invariant_g_offset(), invariant_l_offset(), invariant_leq_offset(), ItemIdGetLength, ItemIdIsDead, ItemPointerCompare(), ItemPointerCopy, ItemPointerGetBlockNumber, ItemPointerGetBlockNumberNoCheck, ItemPointerGetOffsetNumber, ItemPointerGetOffsetNumberNoCheck, LSN_FORMAT_ARGS, offset_is_negative_infinity(), OffsetNumberNext, P_FIRSTDATAKEY, P_HIKEY, P_IGNORE, P_ISLEAF, P_RIGHTMOST, PageGetItem, PageGetItemIdCareful(), PageGetMaxOffsetNumber, PageGetSpecialPointer, palloc_btree_page(), pfree(), psprintf(), RelationGetRelationName, and BTScanInsertData::scantid.

Referenced by bt_check_level_from_leftmost().

◆ bt_tuple_present_callback()

static void bt_tuple_present_callback ( Relation  index,
ItemPointer  tid,
Datum values,
bool isnull,
bool  tupleIsAlive,
void *  checkstate 
)
static

Definition at line 2458 of file verify_nbtree.c.

2460 {
2461  BtreeCheckState *state = (BtreeCheckState *) checkstate;
2462  IndexTuple itup,
2463  norm;
2464 
2465  Assert(state->heapallindexed);
2466 
2467  /* Generate a normalized index tuple for fingerprinting */
2468  itup = index_form_tuple(RelationGetDescr(index), values, isnull);
2469  itup->t_tid = *tid;
2470  norm = bt_normalize_tuple(state, itup);
2471 
2472  /* Probe Bloom filter -- tuple should be present */
2473  if (bloom_lacks_element(state->filter, (unsigned char *) norm,
2474  IndexTupleSize(norm)))
2475  ereport(ERROR,
2477  errmsg("heap tuple (%u,%u) from table \"%s\" lacks matching index tuple within index \"%s\"",
2478  ItemPointerGetBlockNumber(&(itup->t_tid)),
2480  RelationGetRelationName(state->heaprel),
2482  !state->readonly
2483  ? errhint("Retrying verification using the function bt_index_parent_check() might provide a more specific error.")
2484  : 0));
2485 
2486  state->heaptuplespresent++;
2487  pfree(itup);
2488  /* Cannot leak memory here */
2489  if (norm != itup)
2490  pfree(norm);
2491 }
bool bloom_lacks_element(bloom_filter *filter, unsigned char *elem, size_t len)
Definition: bloomfilter.c:157
static Datum values[MAXATTR]
Definition: bootstrap.c:156
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:42
Definition: type.h:90

References Assert(), bloom_lacks_element(), bt_normalize_tuple(), ereport, errcode(), ERRCODE_DATA_CORRUPTED, errhint(), errmsg(), ERROR, index_form_tuple(), IndexTupleSize, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, pfree(), RelationGetDescr, RelationGetRelationName, IndexTupleData::t_tid, and values.

Referenced by bt_check_every_level().

◆ btree_index_checkable()

static void btree_index_checkable ( Relation  rel)
inlinestatic

Definition at line 348 of file verify_nbtree.c.

349 {
350  if (rel->rd_rel->relkind != RELKIND_INDEX ||
351  rel->rd_rel->relam != BTREE_AM_OID)
352  ereport(ERROR,
353  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
354  errmsg("only B-Tree indexes are supported as targets for verification"),
355  errdetail("Relation \"%s\" is not a B-Tree index.",
356  RelationGetRelationName(rel))));
357 
358  if (RELATION_IS_OTHER_TEMP(rel))
359  ereport(ERROR,
360  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
361  errmsg("cannot access temporary tables of other sessions"),
362  errdetail("Index \"%s\" is associated with temporary relation.",
363  RelationGetRelationName(rel))));
364 
365  if (!rel->rd_index->indisvalid)
366  ereport(ERROR,
367  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
368  errmsg("cannot check index \"%s\"",
370  errdetail("Index is not valid.")));
371 }
int errdetail(const char *fmt,...)
Definition: elog.c:1037
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:632
Form_pg_class rd_rel
Definition: rel.h:109

References ereport, errcode(), errdetail(), errmsg(), ERROR, RelationData::rd_index, RelationData::rd_rel, RELATION_IS_OTHER_TEMP, and RelationGetRelationName.

Referenced by bt_index_check_internal().

◆ btree_index_mainfork_expected()

static bool btree_index_mainfork_expected ( Relation  rel)
inlinestatic

Definition at line 382 of file verify_nbtree.c.

383 {
384  if (rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED ||
386  return true;
387 
388  ereport(DEBUG1,
389  (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
390  errmsg("cannot verify unlogged index \"%s\" during recovery, skipping",
391  RelationGetRelationName(rel))));
392 
393  return false;
394 }
bool RecoveryInProgress(void)
Definition: xlog.c:8404

References DEBUG1, ereport, errcode(), errmsg(), RelationData::rd_rel, RecoveryInProgress(), and RelationGetRelationName.

Referenced by bt_index_check_internal().

◆ BTreeTupleGetHeapTIDCareful()

static ItemPointer BTreeTupleGetHeapTIDCareful ( BtreeCheckState state,
IndexTuple  itup,
bool  nonpivot 
)
inlinestatic

Definition at line 3172 of file verify_nbtree.c.

3174 {
3175  ItemPointer htid;
3176 
3177  /*
3178  * Caller determines whether this is supposed to be a pivot or non-pivot
3179  * tuple using page type and item offset number. Verify that tuple
3180  * metadata agrees with this.
3181  */
3182  Assert(state->heapkeyspace);
3183  if (BTreeTupleIsPivot(itup) && nonpivot)
3184  ereport(ERROR,
3185  (errcode(ERRCODE_INDEX_CORRUPTED),
3186  errmsg_internal("block %u or its right sibling block or child block in index \"%s\" has unexpected pivot tuple",
3187  state->targetblock,
3188  RelationGetRelationName(state->rel))));
3189 
3190  if (!BTreeTupleIsPivot(itup) && !nonpivot)
3191  ereport(ERROR,
3192  (errcode(ERRCODE_INDEX_CORRUPTED),
3193  errmsg_internal("block %u or its right sibling block or child block in index \"%s\" has unexpected non-pivot tuple",
3194  state->targetblock,
3195  RelationGetRelationName(state->rel))));
3196 
3197  htid = BTreeTupleGetHeapTID(itup);
3198  if (!ItemPointerIsValid(htid) && nonpivot)
3199  ereport(ERROR,
3200  (errcode(ERRCODE_INDEX_CORRUPTED),
3201  errmsg("block %u or its right sibling block or child block in index \"%s\" contains non-pivot tuple that lacks a heap TID",
3202  state->targetblock,
3203  RelationGetRelationName(state->rel))));
3204 
3205  return htid;
3206 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82

References Assert(), BTreeTupleGetHeapTID(), BTreeTupleIsPivot(), ereport, errcode(), errmsg(), errmsg_internal(), ERROR, ItemPointerIsValid, and RelationGetRelationName.

Referenced by invariant_l_nontarget_offset(), and invariant_l_offset().

◆ BTreeTupleGetPointsToTID()

static ItemPointer BTreeTupleGetPointsToTID ( IndexTuple  itup)
inlinestatic

Definition at line 3220 of file verify_nbtree.c.

3221 {
3222  /*
3223  * Rely on the assumption that !heapkeyspace internal page data items will
3224  * correctly return TID with downlink here -- BTreeTupleGetHeapTID() won't
3225  * recognize it as a pivot tuple, but everything still works out because
3226  * the t_tid field is still returned
3227  */
3228  if (!BTreeTupleIsPivot(itup))
3229  return BTreeTupleGetHeapTID(itup);
3230 
3231  /* Pivot tuple returns TID with downlink block (heapkeyspace variant) */
3232  return &itup->t_tid;
3233 }

References BTreeTupleGetHeapTID(), BTreeTupleIsPivot(), and IndexTupleData::t_tid.

Referenced by bt_target_page_check().

◆ invariant_g_offset()

static bool invariant_g_offset ( BtreeCheckState state,
BTScanInsert  key,
OffsetNumber  lowerbound 
)
inlinestatic

Definition at line 2834 of file verify_nbtree.c.

2836 {
2837  int32 cmp;
2838 
2839  Assert(key->pivotsearch);
2840 
2841  cmp = _bt_compare(state->rel, key, state->target, lowerbound);
2842 
2843  /* pg_upgrade'd indexes may legally have equal sibling tuples */
2844  if (!key->heapkeyspace)
2845  return cmp >= 0;
2846 
2847  /*
2848  * No need to consider the possibility that scankey has attributes that we
2849  * need to force to be interpreted as negative infinity. _bt_compare() is
2850  * able to determine that scankey is greater than negative infinity. The
2851  * distinction between "==" and "<" isn't interesting here, since
2852  * corruption is indicated either way.
2853  */
2854  return cmp > 0;
2855 }
signed int int32
Definition: c.h:429
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:747

References _bt_compare(), Assert(), cmp(), and sort-test::key.

Referenced by bt_target_page_check().

◆ invariant_l_nontarget_offset()

static bool invariant_l_nontarget_offset ( BtreeCheckState state,
BTScanInsert  key,
BlockNumber  nontargetblock,
Page  nontarget,
OffsetNumber  upperbound 
)
inlinestatic

Definition at line 2870 of file verify_nbtree.c.

2873 {
2874  ItemId itemid;
2875  int32 cmp;
2876 
2877  Assert(key->pivotsearch);
2878 
2879  /* Verify line pointer before checking tuple */
2880  itemid = PageGetItemIdCareful(state, nontargetblock, nontarget,
2881  upperbound);
2882  cmp = _bt_compare(state->rel, key, nontarget, upperbound);
2883 
2884  /* pg_upgrade'd indexes may legally have equal sibling tuples */
2885  if (!key->heapkeyspace)
2886  return cmp <= 0;
2887 
2888  /* See invariant_l_offset() for an explanation of this extra step */
2889  if (cmp == 0)
2890  {
2891  IndexTuple child;
2892  int uppnkeyatts;
2893  ItemPointer childheaptid;
2894  BTPageOpaque copaque;
2895  bool nonpivot;
2896 
2897  child = (IndexTuple) PageGetItem(nontarget, itemid);
2898  copaque = (BTPageOpaque) PageGetSpecialPointer(nontarget);
2899  nonpivot = P_ISLEAF(copaque) && upperbound >= P_FIRSTDATAKEY(copaque);
2900 
2901  /* Get number of keys + heap TID for child/non-target item */
2902  uppnkeyatts = BTreeTupleGetNKeyAtts(child, state->rel);
2903  childheaptid = BTreeTupleGetHeapTIDCareful(state, child, nonpivot);
2904 
2905  /* Heap TID is tiebreaker key attribute */
2906  if (key->keysz == uppnkeyatts)
2907  return key->scantid == NULL && childheaptid != NULL;
2908 
2909  return key->keysz < uppnkeyatts;
2910  }
2911 
2912  return cmp < 0;
2913 }
static ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state, IndexTuple itup, bool nonpivot)
#define BTreeTupleGetNKeyAtts(itup, rel)
Definition: verify_nbtree.c:51

References _bt_compare(), Assert(), BTreeTupleGetHeapTIDCareful(), BTreeTupleGetNKeyAtts, cmp(), sort-test::key, P_FIRSTDATAKEY, P_ISLEAF, PageGetItem, PageGetItemIdCareful(), and PageGetSpecialPointer.

Referenced by bt_child_check().

◆ invariant_l_offset()

static bool invariant_l_offset ( BtreeCheckState state,
BTScanInsert  key,
OffsetNumber  upperbound 
)
inlinestatic

Definition at line 2748 of file verify_nbtree.c.

2750 {
2751  ItemId itemid;
2752  int32 cmp;
2753 
2754  Assert(key->pivotsearch);
2755 
2756  /* Verify line pointer before checking tuple */
2757  itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
2758  upperbound);
2759  /* pg_upgrade'd indexes may legally have equal sibling tuples */
2760  if (!key->heapkeyspace)
2761  return invariant_leq_offset(state, key, upperbound);
2762 
2763  cmp = _bt_compare(state->rel, key, state->target, upperbound);
2764 
2765  /*
2766  * _bt_compare() is capable of determining that a scankey with a
2767  * filled-out attribute is greater than pivot tuples where the comparison
2768  * is resolved at a truncated attribute (value of attribute in pivot is
2769  * minus infinity). However, it is not capable of determining that a
2770  * scankey is _less than_ a tuple on the basis of a comparison resolved at
2771  * _scankey_ minus infinity attribute. Complete an extra step to simulate
2772  * having minus infinity values for omitted scankey attribute(s).
2773  */
2774  if (cmp == 0)
2775  {
2776  BTPageOpaque topaque;
2777  IndexTuple ritup;
2778  int uppnkeyatts;
2779  ItemPointer rheaptid;
2780  bool nonpivot;
2781 
2782  ritup = (IndexTuple) PageGetItem(state->target, itemid);
2783  topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
2784  nonpivot = P_ISLEAF(topaque) && upperbound >= P_FIRSTDATAKEY(topaque);
2785 
2786  /* Get number of keys + heap TID for item to the right */
2787  uppnkeyatts = BTreeTupleGetNKeyAtts(ritup, state->rel);
2788  rheaptid = BTreeTupleGetHeapTIDCareful(state, ritup, nonpivot);
2789 
2790  /* Heap TID is tiebreaker key attribute */
2791  if (key->keysz == uppnkeyatts)
2792  return key->scantid == NULL && rheaptid != NULL;
2793 
2794  return key->keysz < uppnkeyatts;
2795  }
2796 
2797  return cmp < 0;
2798 }

References _bt_compare(), Assert(), BTreeTupleGetHeapTIDCareful(), BTreeTupleGetNKeyAtts, cmp(), invariant_leq_offset(), sort-test::key, P_FIRSTDATAKEY, P_ISLEAF, PageGetItem, PageGetItemIdCareful(), and PageGetSpecialPointer.

Referenced by bt_target_page_check().

◆ invariant_leq_offset()

static bool invariant_leq_offset ( BtreeCheckState state,
BTScanInsert  key,
OffsetNumber  upperbound 
)
inlinestatic

Definition at line 2811 of file verify_nbtree.c.

2813 {
2814  int32 cmp;
2815 
2816  Assert(key->pivotsearch);
2817 
2818  cmp = _bt_compare(state->rel, key, state->target, upperbound);
2819 
2820  return cmp <= 0;
2821 }

References _bt_compare(), Assert(), cmp(), and sort-test::key.

Referenced by bt_target_page_check(), and invariant_l_offset().

◆ offset_is_negative_infinity()

static bool offset_is_negative_infinity ( BTPageOpaque  opaque,
OffsetNumber  offset 
)
inlinestatic

Definition at line 2713 of file verify_nbtree.c.

2714 {
2715  /*
2716  * For internal pages only, the first item after high key, if any, is
2717  * negative infinity item. Internal pages always have a negative infinity
2718  * item, whereas leaf pages never have one. This implies that negative
2719  * infinity item is either first or second line item, or there is none
2720  * within page.
2721  *
2722  * Negative infinity items are a special case among pivot tuples. They
2723  * always have zero attributes, while all other pivot tuples always have
2724  * nkeyatts attributes.
2725  *
2726  * Right-most pages don't have a high key, but could be said to
2727  * conceptually have a "positive infinity" high key. Thus, there is a
2728  * symmetry between down link items in parent pages, and high keys in
2729  * children. Together, they represent the part of the key space that
2730  * belongs to each page in the index. For example, all children of the
2731  * root page will have negative infinity as a lower bound from root
2732  * negative infinity downlink, and positive infinity as an upper bound
2733  * (implicitly, from "imaginary" positive infinity high key in root).
2734  */
2735  return !P_ISLEAF(opaque) && offset == P_FIRSTDATAKEY(opaque);
2736 }

References P_FIRSTDATAKEY, and P_ISLEAF.

Referenced by bt_child_check(), bt_child_highkey_check(), and bt_target_page_check().

◆ PageGetItemIdCareful()

static ItemId PageGetItemIdCareful ( BtreeCheckState state,
BlockNumber  block,
Page  page,
OffsetNumber  offset 
)
static

Definition at line 3132 of file verify_nbtree.c.

3134 {
3135  ItemId itemid = PageGetItemId(page, offset);
3136 
3137  if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) >
3138  BLCKSZ - MAXALIGN(sizeof(BTPageOpaqueData)))
3139  ereport(ERROR,
3140  (errcode(ERRCODE_INDEX_CORRUPTED),
3141  errmsg("line pointer points past end of tuple space in index \"%s\"",
3143  errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
3144  block, offset, ItemIdGetOffset(itemid),
3145  ItemIdGetLength(itemid),
3146  ItemIdGetFlags(itemid))));
3147 
3148  /*
3149  * Verify that line pointer isn't LP_REDIRECT or LP_UNUSED, since nbtree
3150  * never uses either. Verify that line pointer has storage, too, since
3151  * even LP_DEAD items should within nbtree.
3152  */
3153  if (ItemIdIsRedirected(itemid) || !ItemIdIsUsed(itemid) ||
3154  ItemIdGetLength(itemid) == 0)
3155  ereport(ERROR,
3156  (errcode(ERRCODE_INDEX_CORRUPTED),
3157  errmsg("invalid line pointer storage in index \"%s\"",
3159  errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
3160  block, offset, ItemIdGetOffset(itemid),
3161  ItemIdGetLength(itemid),
3162  ItemIdGetFlags(itemid))));
3163 
3164  return itemid;
3165 }
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:234
#define ItemIdGetOffset(itemId)
Definition: itemid.h:65
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define ItemIdGetFlags(itemId)
Definition: itemid.h:71

References ereport, errcode(), errdetail_internal(), errmsg(), ERROR, ItemIdGetFlags, ItemIdGetLength, ItemIdGetOffset, ItemIdIsRedirected, ItemIdIsUsed, MAXALIGN, PageGetItemId, and RelationGetRelationName.

Referenced by bt_check_level_from_leftmost(), bt_child_check(), bt_child_highkey_check(), bt_downlink_missing_check(), bt_right_page_check_scankey(), bt_target_page_check(), invariant_l_nontarget_offset(), and invariant_l_offset().

◆ palloc_btree_page()

static Page palloc_btree_page ( BtreeCheckState state,
BlockNumber  blocknum 
)
static

Definition at line 2930 of file verify_nbtree.c.

2931 {
2932  Buffer buffer;
2933  Page page;
2934  BTPageOpaque opaque;
2935  OffsetNumber maxoffset;
2936 
2937  page = palloc(BLCKSZ);
2938 
2939  /*
2940  * We copy the page into local storage to avoid holding pin on the buffer
2941  * longer than we must.
2942  */
2943  buffer = ReadBufferExtended(state->rel, MAIN_FORKNUM, blocknum, RBM_NORMAL,
2944  state->checkstrategy);
2945  LockBuffer(buffer, BT_READ);
2946 
2947  /*
2948  * Perform the same basic sanity checking that nbtree itself performs for
2949  * every page:
2950  */
2951  _bt_checkpage(state->rel, buffer);
2952 
2953  /* Only use copy of page in palloc()'d memory */
2954  memcpy(page, BufferGetPage(buffer), BLCKSZ);
2955  UnlockReleaseBuffer(buffer);
2956 
2957  opaque = (BTPageOpaque) PageGetSpecialPointer(page);
2958 
2959  if (P_ISMETA(opaque) && blocknum != BTREE_METAPAGE)
2960  ereport(ERROR,
2961  (errcode(ERRCODE_INDEX_CORRUPTED),
2962  errmsg("invalid meta page found at block %u in index \"%s\"",
2963  blocknum, RelationGetRelationName(state->rel))));
2964 
2965  /* Check page from block that ought to be meta page */
2966  if (blocknum == BTREE_METAPAGE)
2967  {
2968  BTMetaPageData *metad = BTPageGetMeta(page);
2969 
2970  if (!P_ISMETA(opaque) ||
2971  metad->btm_magic != BTREE_MAGIC)
2972  ereport(ERROR,
2973  (errcode(ERRCODE_INDEX_CORRUPTED),
2974  errmsg("index \"%s\" meta page is corrupt",
2975  RelationGetRelationName(state->rel))));
2976 
2977  if (metad->btm_version < BTREE_MIN_VERSION ||
2978  metad->btm_version > BTREE_VERSION)
2979  ereport(ERROR,
2980  (errcode(ERRCODE_INDEX_CORRUPTED),
2981  errmsg("version mismatch in index \"%s\": file version %d, "
2982  "current version %d, minimum supported version %d",
2984  metad->btm_version, BTREE_VERSION,
2985  BTREE_MIN_VERSION)));
2986 
2987  /* Finished with metapage checks */
2988  return page;
2989  }
2990 
2991  /*
2992  * Deleted pages that still use the old 32-bit XID representation have no
2993  * sane "level" field because they type pun the field, but all other pages
2994  * (including pages deleted on Postgres 14+) have a valid value.
2995  */
2996  if (!P_ISDELETED(opaque) || P_HAS_FULLXID(opaque))
2997  {
2998  /* Okay, no reason not to trust btpo_level field from page */
2999 
3000  if (P_ISLEAF(opaque) && opaque->btpo_level != 0)
3001  ereport(ERROR,
3002  (errcode(ERRCODE_INDEX_CORRUPTED),
3003  errmsg_internal("invalid leaf page level %u for block %u in index \"%s\"",
3004  opaque->btpo_level, blocknum,
3005  RelationGetRelationName(state->rel))));
3006 
3007  if (!P_ISLEAF(opaque) && opaque->btpo_level == 0)
3008  ereport(ERROR,
3009  (errcode(ERRCODE_INDEX_CORRUPTED),
3010  errmsg_internal("invalid internal page level 0 for block %u in index \"%s\"",
3011  blocknum,
3012  RelationGetRelationName(state->rel))));
3013  }
3014 
3015  /*
3016  * Sanity checks for number of items on page.
3017  *
3018  * As noted at the beginning of _bt_binsrch(), an internal page must have
3019  * children, since there must always be a negative infinity downlink
3020  * (there may also be a highkey). In the case of non-rightmost leaf
3021  * pages, there must be at least a highkey. The exceptions are deleted
3022  * pages, which contain no items.
3023  *
3024  * This is correct when pages are half-dead, since internal pages are
3025  * never half-dead, and leaf pages must have a high key when half-dead
3026  * (the rightmost page can never be deleted). It's also correct with
3027  * fully deleted pages: _bt_unlink_halfdead_page() doesn't change anything
3028  * about the target page other than setting the page as fully dead, and
3029  * setting its xact field. In particular, it doesn't change the sibling
3030  * links in the deletion target itself, since they're required when index
3031  * scans land on the deletion target, and then need to move right (or need
3032  * to move left, in the case of backward index scans).
3033  */
3034  maxoffset = PageGetMaxOffsetNumber(page);
3035  if (maxoffset > MaxIndexTuplesPerPage)
3036  ereport(ERROR,
3037  (errcode(ERRCODE_INDEX_CORRUPTED),
3038  errmsg("Number of items on block %u of index \"%s\" exceeds MaxIndexTuplesPerPage (%u)",
3039  blocknum, RelationGetRelationName(state->rel),
3041 
3042  if (!P_ISLEAF(opaque) && !P_ISDELETED(opaque) && maxoffset < P_FIRSTDATAKEY(opaque))
3043  ereport(ERROR,
3044  (errcode(ERRCODE_INDEX_CORRUPTED),
3045  errmsg("internal block %u in index \"%s\" lacks high key and/or at least one downlink",
3046  blocknum, RelationGetRelationName(state->rel))));
3047 
3048  if (P_ISLEAF(opaque) && !P_ISDELETED(opaque) && !P_RIGHTMOST(opaque) && maxoffset < P_HIKEY)
3049  ereport(ERROR,
3050  (errcode(ERRCODE_INDEX_CORRUPTED),
3051  errmsg("non-rightmost leaf block %u in index \"%s\" lacks high key item",
3052  blocknum, RelationGetRelationName(state->rel))));
3053 
3054  /*
3055  * In general, internal pages are never marked half-dead, except on
3056  * versions of Postgres prior to 9.4, where it can be valid transient
3057  * state. This state is nonetheless treated as corruption by VACUUM on
3058  * from version 9.4 on, so do the same here. See _bt_pagedel() for full
3059  * details.
3060  */
3061  if (!P_ISLEAF(opaque) && P_ISHALFDEAD(opaque))
3062  ereport(ERROR,
3063  (errcode(ERRCODE_INDEX_CORRUPTED),
3064  errmsg("internal page block %u in index \"%s\" is half-dead",
3065  blocknum, RelationGetRelationName(state->rel)),
3066  errhint("This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it.")));
3067 
3068  /*
3069  * Check that internal pages have no garbage items, and that no page has
3070  * an invalid combination of deletion-related page level flags
3071  */
3072  if (!P_ISLEAF(opaque) && P_HAS_GARBAGE(opaque))
3073  ereport(ERROR,
3074  (errcode(ERRCODE_INDEX_CORRUPTED),
3075  errmsg_internal("internal page block %u in index \"%s\" has garbage items",
3076  blocknum, RelationGetRelationName(state->rel))));
3077 
3078  if (P_HAS_FULLXID(opaque) && !P_ISDELETED(opaque))
3079  ereport(ERROR,
3080  (errcode(ERRCODE_INDEX_CORRUPTED),
3081  errmsg_internal("full transaction id page flag appears in non-deleted block %u in index \"%s\"",
3082  blocknum, RelationGetRelationName(state->rel))));
3083 
3084  if (P_ISDELETED(opaque) && P_ISHALFDEAD(opaque))
3085  ereport(ERROR,
3086  (errcode(ERRCODE_INDEX_CORRUPTED),
3087  errmsg_internal("deleted page block %u in index \"%s\" is half-dead",
3088  blocknum, RelationGetRelationName(state->rel))));
3089 
3090  return page;
3091 }
#define MaxIndexTuplesPerPage
Definition: itup.h:144
void * palloc(Size size)
Definition: mcxt.c:1062
#define BTREE_MIN_VERSION
Definition: nbtree.h:149
#define P_HAS_GARBAGE(opaque)
Definition: nbtree.h:225
#define P_ISMETA(opaque)
Definition: nbtree.h:222
#define BTREE_MAGIC
Definition: nbtree.h:147
#define BTREE_VERSION
Definition: nbtree.h:148
uint32 btm_version
Definition: nbtree.h:104
uint32 btm_magic
Definition: nbtree.h:103

References _bt_checkpage(), BT_READ, BTMetaPageData::btm_magic, BTMetaPageData::btm_version, BTPageGetMeta, BTPageOpaqueData::btpo_level, BTREE_MAGIC, BTREE_METAPAGE, BTREE_MIN_VERSION, BTREE_VERSION, BufferGetPage, ereport, errcode(), errhint(), errmsg(), errmsg_internal(), ERROR, LockBuffer(), MAIN_FORKNUM, MaxIndexTuplesPerPage, P_FIRSTDATAKEY, P_HAS_FULLXID, P_HAS_GARBAGE, P_HIKEY, P_ISDELETED, P_ISHALFDEAD, P_ISLEAF, P_ISMETA, P_RIGHTMOST, PageGetMaxOffsetNumber, PageGetSpecialPointer, palloc(), RBM_NORMAL, ReadBufferExtended(), RelationGetRelationName, and UnlockReleaseBuffer().

Referenced by bt_check_every_level(), bt_check_level_from_leftmost(), bt_child_check(), bt_child_highkey_check(), bt_downlink_missing_check(), bt_right_page_check_scankey(), and bt_target_page_check().

◆ PG_FUNCTION_INFO_V1() [1/2]

PG_FUNCTION_INFO_V1 ( bt_index_check  )

◆ PG_FUNCTION_INFO_V1() [2/2]

PG_FUNCTION_INFO_V1 ( bt_index_parent_check  )

Variable Documentation

◆ PG_MODULE_MAGIC

PG_MODULE_MAGIC

Definition at line 44 of file verify_nbtree.c.