PostgreSQL Source Code  git master
verify_nbtree.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * verify_nbtree.c
4  * Verifies the integrity of nbtree indexes based on invariants.
5  *
6  * For B-Tree indexes, verification includes checking that each page in the
7  * target index has items in logical order as reported by an insertion scankey
8  * (the insertion scankey sort-wise NULL semantics are needed for
9  * verification).
10  *
11  * When index-to-heap verification is requested, a Bloom filter is used to
12  * fingerprint all tuples in the target index, as the index is traversed to
13  * verify its structure. A heap scan later uses Bloom filter probes to verify
14  * that every visible heap tuple has a matching index tuple.
15  *
16  *
17  * Copyright (c) 2017-2019, PostgreSQL Global Development Group
18  *
19  * IDENTIFICATION
20  * contrib/amcheck/verify_nbtree.c
21  *
22  *-------------------------------------------------------------------------
23  */
24 #include "postgres.h"
25 
26 #include "access/htup_details.h"
27 #include "access/nbtree.h"
28 #include "access/table.h"
29 #include "access/tableam.h"
30 #include "access/transam.h"
31 #include "access/xact.h"
32 #include "catalog/index.h"
33 #include "catalog/pg_am.h"
34 #include "commands/tablecmds.h"
35 #include "lib/bloomfilter.h"
36 #include "miscadmin.h"
37 #include "storage/lmgr.h"
38 #include "storage/smgr.h"
39 #include "utils/memutils.h"
40 #include "utils/snapmgr.h"
41 
42 
44 
45 /*
46  * A B-Tree cannot possibly have this many levels, since there must be one
47  * block per level, which is bound by the range of BlockNumber:
48  */
49 #define InvalidBtreeLevel ((uint32) InvalidBlockNumber)
50 #define BTreeTupleGetNKeyAtts(itup, rel) \
51  Min(IndexRelationGetNumberOfKeyAttributes(rel), BTreeTupleGetNAtts(itup, rel))
52 
53 /*
54  * State associated with verifying a B-Tree index
55  *
56  * target is the point of reference for a verification operation.
57  *
58  * Other B-Tree pages may be allocated, but those are always auxiliary (e.g.,
59  * they are current target's child pages). Conceptually, problems are only
60  * ever found in the current target page (or for a particular heap tuple during
61  * heapallindexed verification). Each page found by verification's left/right,
62  * top/bottom scan becomes the target exactly once.
63  */
64 typedef struct BtreeCheckState
65 {
66  /*
67  * Unchanging state, established at start of verification:
68  */
69 
70  /* B-Tree Index Relation and associated heap relation */
73  /* rel is heapkeyspace index? */
75  /* ShareLock held on heap/index, rather than AccessShareLock? */
76  bool readonly;
77  /* Also verifying heap has no unindexed tuples? */
79  /* Also making sure non-pivot tuples can be found by new search? */
81  /* Per-page context */
83  /* Buffer access strategy */
85 
86  /*
87  * Mutable state, for verification of particular page:
88  */
89 
90  /* Current target page */
92  /* Target block number */
94  /* Target page's LSN */
96 
97  /*
98  * Mutable state, for optional heapallindexed verification:
99  */
100 
101  /* Bloom filter fingerprints B-Tree index */
103  /* Bloom filter fingerprints downlink blocks within tree */
105  /* Right half of incomplete split marker */
107  /* Debug counter */
110 
111 /*
112  * Starting point for verifying an entire B-Tree index level
113  */
114 typedef struct BtreeLevel
115 {
116  /* Level number (0 is leaf page level). */
118 
119  /* Left most block on level. Scan of level begins here. */
121 
122  /* Is this level reported as "true" root level by meta page? */
124 } BtreeLevel;
125 
128 
129 static void bt_index_check_internal(Oid indrelid, bool parentcheck,
130  bool heapallindexed, bool rootdescend);
131 static inline void btree_index_checkable(Relation rel);
132 static inline bool btree_index_mainfork_expected(Relation rel);
134  bool heapkeyspace, bool readonly, bool heapallindexed,
135  bool rootdescend);
137  BtreeLevel level);
140 static void bt_downlink_check(BtreeCheckState *state, BTScanInsert targetkey,
141  BlockNumber childblock);
144  Datum *values, bool *isnull,
145  bool tupleIsAlive, void *checkstate);
147  IndexTuple itup);
148 static bool bt_rootdescend(BtreeCheckState *state, IndexTuple itup);
149 static inline bool offset_is_negative_infinity(BTPageOpaque opaque,
150  OffsetNumber offset);
152  OffsetNumber upperbound);
153 static inline bool invariant_leq_offset(BtreeCheckState *state,
155  OffsetNumber upperbound);
157  OffsetNumber lowerbound);
160  BlockNumber nontargetblock,
161  Page nontarget,
162  OffsetNumber upperbound);
165  IndexTuple itup);
167  Page page, OffsetNumber offset);
169  IndexTuple itup, bool nonpivot);
170 
171 /*
172  * bt_index_check(index regclass, heapallindexed boolean)
173  *
174  * Verify integrity of B-Tree index.
175  *
176  * Acquires AccessShareLock on heap & index relations. Does not consider
177  * invariants that exist between parent/child pages. Optionally verifies
178  * that heap does not contain any unindexed or incorrectly indexed tuples.
179  */
180 Datum
182 {
183  Oid indrelid = PG_GETARG_OID(0);
184  bool heapallindexed = false;
185 
186  if (PG_NARGS() == 2)
187  heapallindexed = PG_GETARG_BOOL(1);
188 
189  bt_index_check_internal(indrelid, false, heapallindexed, false);
190 
191  PG_RETURN_VOID();
192 }
193 
194 /*
195  * bt_index_parent_check(index regclass, heapallindexed boolean)
196  *
197  * Verify integrity of B-Tree index.
198  *
199  * Acquires ShareLock on heap & index relations. Verifies that downlinks in
200  * parent pages are valid lower bounds on child pages. Optionally verifies
201  * that heap does not contain any unindexed or incorrectly indexed tuples.
202  */
203 Datum
205 {
206  Oid indrelid = PG_GETARG_OID(0);
207  bool heapallindexed = false;
208  bool rootdescend = false;
209 
210  if (PG_NARGS() >= 2)
211  heapallindexed = PG_GETARG_BOOL(1);
212  if (PG_NARGS() == 3)
213  rootdescend = PG_GETARG_BOOL(2);
214 
215  bt_index_check_internal(indrelid, true, heapallindexed, rootdescend);
216 
217  PG_RETURN_VOID();
218 }
219 
220 /*
221  * Helper for bt_index_[parent_]check, coordinating the bulk of the work.
222  */
223 static void
224 bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed,
225  bool rootdescend)
226 {
227  Oid heapid;
228  Relation indrel;
230  LOCKMODE lockmode;
231 
232  if (parentcheck)
233  lockmode = ShareLock;
234  else
235  lockmode = AccessShareLock;
236 
237  /*
238  * We must lock table before index to avoid deadlocks. However, if the
239  * passed indrelid isn't an index then IndexGetRelation() will fail.
240  * Rather than emitting a not-very-helpful error message, postpone
241  * complaining, expecting that the is-it-an-index test below will fail.
242  *
243  * In hot standby mode this will raise an error when parentcheck is true.
244  */
245  heapid = IndexGetRelation(indrelid, true);
246  if (OidIsValid(heapid))
247  heaprel = table_open(heapid, lockmode);
248  else
249  heaprel = NULL;
250 
251  /*
252  * Open the target index relations separately (like relation_openrv(), but
253  * with heap relation locked first to prevent deadlocking). In hot
254  * standby mode this will raise an error when parentcheck is true.
255  *
256  * There is no need for the usual indcheckxmin usability horizon test
257  * here, even in the heapallindexed case, because index undergoing
258  * verification only needs to have entries for a new transaction snapshot.
259  * (If this is a parentcheck verification, there is no question about
260  * committed or recently dead heap tuples lacking index entries due to
261  * concurrent activity.)
262  */
263  indrel = index_open(indrelid, lockmode);
264 
265  /*
266  * Since we did the IndexGetRelation call above without any lock, it's
267  * barely possible that a race against an index drop/recreation could have
268  * netted us the wrong table.
269  */
270  if (heaprel == NULL || heapid != IndexGetRelation(indrelid, false))
271  ereport(ERROR,
273  errmsg("could not open parent table of index %s",
274  RelationGetRelationName(indrel))));
275 
276  /* Relation suitable for checking as B-Tree? */
277  btree_index_checkable(indrel);
278 
279  if (btree_index_mainfork_expected(indrel))
280  {
281  bool heapkeyspace;
282 
283  RelationOpenSmgr(indrel);
284  if (!smgrexists(indrel->rd_smgr, MAIN_FORKNUM))
285  ereport(ERROR,
286  (errcode(ERRCODE_INDEX_CORRUPTED),
287  errmsg("index \"%s\" lacks a main relation fork",
288  RelationGetRelationName(indrel))));
289 
290  /* Check index, possibly against table it is an index on */
291  heapkeyspace = _bt_heapkeyspace(indrel);
292  bt_check_every_level(indrel, heaprel, heapkeyspace, parentcheck,
293  heapallindexed, rootdescend);
294  }
295 
296  /*
297  * Release locks early. That's ok here because nothing in the called
298  * routines will trigger shared cache invalidations to be sent, so we can
299  * relax the usual pattern of only releasing locks after commit.
300  */
301  index_close(indrel, lockmode);
302  if (heaprel)
303  table_close(heaprel, lockmode);
304 }
305 
306 /*
307  * Basic checks about the suitability of a relation for checking as a B-Tree
308  * index.
309  *
310  * NB: Intentionally not checking permissions, the function is normally not
311  * callable by non-superusers. If granted, it's useful to be able to check a
312  * whole cluster.
313  */
314 static inline void
316 {
317  if (rel->rd_rel->relkind != RELKIND_INDEX ||
318  rel->rd_rel->relam != BTREE_AM_OID)
319  ereport(ERROR,
320  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
321  errmsg("only B-Tree indexes are supported as targets for verification"),
322  errdetail("Relation \"%s\" is not a B-Tree index.",
323  RelationGetRelationName(rel))));
324 
325  if (RELATION_IS_OTHER_TEMP(rel))
326  ereport(ERROR,
327  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
328  errmsg("cannot access temporary tables of other sessions"),
329  errdetail("Index \"%s\" is associated with temporary relation.",
330  RelationGetRelationName(rel))));
331 
332  if (!rel->rd_index->indisvalid)
333  ereport(ERROR,
334  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
335  errmsg("cannot check index \"%s\"",
337  errdetail("Index is not valid.")));
338 }
339 
340 /*
341  * Check if B-Tree index relation should have a file for its main relation
342  * fork. Verification uses this to skip unlogged indexes when in hot standby
343  * mode, where there is simply nothing to verify.
344  *
345  * NB: Caller should call btree_index_checkable() before calling here.
346  */
347 static inline bool
349 {
350  if (rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED ||
352  return true;
353 
354  ereport(NOTICE,
355  (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
356  errmsg("cannot verify unlogged index \"%s\" during recovery, skipping",
357  RelationGetRelationName(rel))));
358 
359  return false;
360 }
361 
362 /*
363  * Main entry point for B-Tree SQL-callable functions. Walks the B-Tree in
364  * logical order, verifying invariants as it goes. Optionally, verification
365  * checks if the heap relation contains any tuples that are not represented in
366  * the index but should be.
367  *
368  * It is the caller's responsibility to acquire appropriate heavyweight lock on
369  * the index relation, and advise us if extra checks are safe when a ShareLock
370  * is held. (A lock of the same type must also have been acquired on the heap
371  * relation.)
372  *
373  * A ShareLock is generally assumed to prevent any kind of physical
374  * modification to the index structure, including modifications that VACUUM may
375  * make. This does not include setting of the LP_DEAD bit by concurrent index
376  * scans, although that is just metadata that is not able to directly affect
377  * any check performed here. Any concurrent process that might act on the
378  * LP_DEAD bit being set (recycle space) requires a heavyweight lock that
379  * cannot be held while we hold a ShareLock. (Besides, even if that could
380  * happen, the ad-hoc recycling when a page might otherwise split is performed
381  * per-page, and requires an exclusive buffer lock, which wouldn't cause us
382  * trouble. _bt_delitems_vacuum() may only delete leaf items, and so the extra
383  * parent/child check cannot be affected.)
384  */
385 static void
387  bool readonly, bool heapallindexed, bool rootdescend)
388 {
390  Page metapage;
391  BTMetaPageData *metad;
392  uint32 previouslevel;
393  BtreeLevel current;
394  Snapshot snapshot = SnapshotAny;
395 
396  /*
397  * RecentGlobalXmin assertion matches index_getnext_tid(). See note on
398  * RecentGlobalXmin/B-Tree page deletion.
399  */
401 
402  /*
403  * Initialize state for entire verification operation
404  */
405  state = palloc0(sizeof(BtreeCheckState));
406  state->rel = rel;
407  state->heaprel = heaprel;
408  state->heapkeyspace = heapkeyspace;
409  state->readonly = readonly;
411  state->rootdescend = rootdescend;
412 
413  if (state->heapallindexed)
414  {
415  int64 total_pages;
416  int64 total_elems;
417  uint64 seed;
418 
419  /*
420  * Size Bloom filter based on estimated number of tuples in index,
421  * while conservatively assuming that each block must contain at least
422  * MaxIndexTuplesPerPage / 5 non-pivot tuples. (Non-leaf pages cannot
423  * contain non-pivot tuples. That's okay because they generally make
424  * up no more than about 1% of all pages in the index.)
425  */
426  total_pages = RelationGetNumberOfBlocks(rel);
427  total_elems = Max(total_pages * (MaxIndexTuplesPerPage / 5),
428  (int64) state->rel->rd_rel->reltuples);
429  /* Random seed relies on backend srandom() call to avoid repetition */
430  seed = random();
431  /* Create Bloom filter to fingerprint index */
432  state->filter = bloom_create(total_elems, maintenance_work_mem, seed);
433  state->heaptuplespresent = 0;
434 
435  /*
436  * Register our own snapshot in !readonly case, rather than asking
437  * table_index_build_scan() to do this for us later. This needs to
438  * happen before index fingerprinting begins, so we can later be
439  * certain that index fingerprinting should have reached all tuples
440  * returned by table_index_build_scan().
441  *
442  * In readonly case, we also check for problems with missing
443  * downlinks. A second Bloom filter is used for this.
444  */
445  if (!state->readonly)
446  {
448 
449  /*
450  * GetTransactionSnapshot() always acquires a new MVCC snapshot in
451  * READ COMMITTED mode. A new snapshot is guaranteed to have all
452  * the entries it requires in the index.
453  *
454  * We must defend against the possibility that an old xact
455  * snapshot was returned at higher isolation levels when that
456  * snapshot is not safe for index scans of the target index. This
457  * is possible when the snapshot sees tuples that are before the
458  * index's indcheckxmin horizon. Throwing an error here should be
459  * very rare. It doesn't seem worth using a secondary snapshot to
460  * avoid this.
461  */
462  if (IsolationUsesXactSnapshot() && rel->rd_index->indcheckxmin &&
464  snapshot->xmin))
465  ereport(ERROR,
466  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
467  errmsg("index \"%s\" cannot be verified using transaction snapshot",
468  RelationGetRelationName(rel))));
469  }
470  else
471  {
472  /*
473  * Extra readonly downlink check.
474  *
475  * In readonly case, we know that there cannot be a concurrent
476  * page split or a concurrent page deletion, which gives us the
477  * opportunity to verify that every non-ignorable page had a
478  * downlink one level up. We must be tolerant of interrupted page
479  * splits and page deletions, though. This is taken care of in
480  * bt_downlink_missing_check().
481  */
482  state->downlinkfilter = bloom_create(total_pages, work_mem, seed);
483  }
484  }
485 
486  Assert(!state->rootdescend || state->readonly);
487  if (state->rootdescend && !state->heapkeyspace)
488  ereport(ERROR,
489  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
490  errmsg("cannot verify that tuples from index \"%s\" can each be found by an independent index search",
492  errhint("Only B-Tree version 4 indexes support rootdescend verification.")));
493 
494  /* Create context for page */
496  "amcheck context",
499 
500  /* Get true root block from meta-page */
501  metapage = palloc_btree_page(state, BTREE_METAPAGE);
502  metad = BTPageGetMeta(metapage);
503 
504  /*
505  * Certain deletion patterns can result in "skinny" B-Tree indexes, where
506  * the fast root and true root differ.
507  *
508  * Start from the true root, not the fast root, unlike conventional index
509  * scans. This approach is more thorough, and removes the risk of
510  * following a stale fast root from the meta page.
511  */
512  if (metad->btm_fastroot != metad->btm_root)
513  ereport(DEBUG1,
514  (errcode(ERRCODE_NO_DATA),
515  errmsg("harmless fast root mismatch in index %s",
517  errdetail_internal("Fast root block %u (level %u) differs from true root block %u (level %u).",
518  metad->btm_fastroot, metad->btm_fastlevel,
519  metad->btm_root, metad->btm_level)));
520 
521  /*
522  * Starting at the root, verify every level. Move left to right, top to
523  * bottom. Note that there may be no pages other than the meta page (meta
524  * page can indicate that root is P_NONE when the index is totally empty).
525  */
526  previouslevel = InvalidBtreeLevel;
527  current.level = metad->btm_level;
528  current.leftmost = metad->btm_root;
529  current.istruerootlevel = true;
530  while (current.leftmost != P_NONE)
531  {
532  /*
533  * Leftmost page on level cannot be right half of incomplete split.
534  * This can go stale immediately in !readonly case.
535  */
536  state->rightsplit = false;
537 
538  /*
539  * Verify this level, and get left most page for next level down, if
540  * not at leaf level
541  */
542  current = bt_check_level_from_leftmost(state, current);
543 
544  if (current.leftmost == InvalidBlockNumber)
545  ereport(ERROR,
546  (errcode(ERRCODE_INDEX_CORRUPTED),
547  errmsg("index \"%s\" has no valid pages on level below %u or first level",
548  RelationGetRelationName(rel), previouslevel)));
549 
550  previouslevel = current.level;
551  }
552 
553  /*
554  * * Check whether heap contains unindexed/malformed tuples *
555  */
556  if (state->heapallindexed)
557  {
558  IndexInfo *indexinfo = BuildIndexInfo(state->rel);
559  TableScanDesc scan;
560 
561  /* Report on extra downlink checks performed in readonly case */
562  if (state->readonly)
563  {
564  ereport(DEBUG1,
565  (errmsg_internal("finished verifying presence of downlink blocks within index \"%s\" with bitset %.2f%% set",
567  100.0 * bloom_prop_bits_set(state->downlinkfilter))));
568  bloom_free(state->downlinkfilter);
569  }
570 
571  /*
572  * Create our own scan for table_index_build_scan(), rather than
573  * getting it to do so for us. This is required so that we can
574  * actually use the MVCC snapshot registered earlier in !readonly
575  * case.
576  *
577  * Note that table_index_build_scan() calls heap_endscan() for us.
578  */
579  scan = table_beginscan_strat(state->heaprel, /* relation */
580  snapshot, /* snapshot */
581  0, /* number of keys */
582  NULL, /* scan key */
583  true, /* buffer access strategy OK */
584  true); /* syncscan OK? */
585 
586  /*
587  * Scan will behave as the first scan of a CREATE INDEX CONCURRENTLY
588  * behaves in !readonly case.
589  *
590  * It's okay that we don't actually use the same lock strength for the
591  * heap relation as any other ii_Concurrent caller would in !readonly
592  * case. We have no reason to care about a concurrent VACUUM
593  * operation, since there isn't going to be a second scan of the heap
594  * that needs to be sure that there was no concurrent recycling of
595  * TIDs.
596  */
597  indexinfo->ii_Concurrent = !state->readonly;
598 
599  /*
600  * Don't wait for uncommitted tuple xact commit/abort when index is a
601  * unique index on a catalog (or an index used by an exclusion
602  * constraint). This could otherwise happen in the readonly case.
603  */
604  indexinfo->ii_Unique = false;
605  indexinfo->ii_ExclusionOps = NULL;
606  indexinfo->ii_ExclusionProcs = NULL;
607  indexinfo->ii_ExclusionStrats = NULL;
608 
609  elog(DEBUG1, "verifying that tuples from index \"%s\" are present in \"%s\"",
612 
613  table_index_build_scan(state->heaprel, state->rel, indexinfo, true, false,
614  bt_tuple_present_callback, (void *) state, scan);
615 
616  ereport(DEBUG1,
617  (errmsg_internal("finished verifying presence of " INT64_FORMAT " tuples from table \"%s\" with bitset %.2f%% set",
618  state->heaptuplespresent, RelationGetRelationName(heaprel),
619  100.0 * bloom_prop_bits_set(state->filter))));
620 
621  if (snapshot != SnapshotAny)
622  UnregisterSnapshot(snapshot);
623 
624  bloom_free(state->filter);
625  }
626 
627  /* Be tidy: */
629 }
630 
631 /*
632  * Given a left-most block at some level, move right, verifying each page
633  * individually (with more verification across pages for "readonly"
634  * callers). Caller should pass the true root page as the leftmost initially,
635  * working their way down by passing what is returned for the last call here
636  * until level 0 (leaf page level) was reached.
637  *
638  * Returns state for next call, if any. This includes left-most block number
639  * one level lower that should be passed on next level/call, which is set to
640  * P_NONE on last call here (when leaf level is verified). Level numbers
641  * follow the nbtree convention: higher levels have higher numbers, because new
642  * levels are added only due to a root page split. Note that prior to the
643  * first root page split, the root is also a leaf page, so there is always a
644  * level 0 (leaf level), and it's always the last level processed.
645  *
646  * Note on memory management: State's per-page context is reset here, between
647  * each call to bt_target_page_check().
648  */
649 static BtreeLevel
651 {
652  /* State to establish early, concerning entire level */
653  BTPageOpaque opaque;
654  MemoryContext oldcontext;
655  BtreeLevel nextleveldown;
656 
657  /* Variables for iterating across level using right links */
658  BlockNumber leftcurrent = P_NONE;
659  BlockNumber current = level.leftmost;
660 
661  /* Initialize return state */
662  nextleveldown.leftmost = InvalidBlockNumber;
663  nextleveldown.level = InvalidBtreeLevel;
664  nextleveldown.istruerootlevel = false;
665 
666  /* Use page-level context for duration of this call */
667  oldcontext = MemoryContextSwitchTo(state->targetcontext);
668 
669  elog(DEBUG2, "verifying level %u%s", level.level,
670  level.istruerootlevel ?
671  " (true root level)" : level.level == 0 ? " (leaf level)" : "");
672 
673  do
674  {
675  /* Don't rely on CHECK_FOR_INTERRUPTS() calls at lower level */
677 
678  /* Initialize state for this iteration */
679  state->targetblock = current;
680  state->target = palloc_btree_page(state, state->targetblock);
681  state->targetlsn = PageGetLSN(state->target);
682 
683  opaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
684 
685  if (P_IGNORE(opaque))
686  {
687  /*
688  * Since there cannot be a concurrent VACUUM operation in readonly
689  * mode, and since a page has no links within other pages
690  * (siblings and parent) once it is marked fully deleted, it
691  * should be impossible to land on a fully deleted page in
692  * readonly mode. See bt_downlink_check() for further details.
693  *
694  * The bt_downlink_check() P_ISDELETED() check is repeated here so
695  * that pages that are only reachable through sibling links get
696  * checked.
697  */
698  if (state->readonly && P_ISDELETED(opaque))
699  ereport(ERROR,
700  (errcode(ERRCODE_INDEX_CORRUPTED),
701  errmsg("downlink or sibling link points to deleted block in index \"%s\"",
702  RelationGetRelationName(state->rel)),
703  errdetail_internal("Block=%u left block=%u left link from block=%u.",
704  current, leftcurrent, opaque->btpo_prev)));
705 
706  if (P_RIGHTMOST(opaque))
707  ereport(ERROR,
708  (errcode(ERRCODE_INDEX_CORRUPTED),
709  errmsg("block %u fell off the end of index \"%s\"",
710  current, RelationGetRelationName(state->rel))));
711  else
712  ereport(DEBUG1,
713  (errcode(ERRCODE_NO_DATA),
714  errmsg("block %u of index \"%s\" ignored",
715  current, RelationGetRelationName(state->rel))));
716  goto nextpage;
717  }
718  else if (nextleveldown.leftmost == InvalidBlockNumber)
719  {
720  /*
721  * A concurrent page split could make the caller supplied leftmost
722  * block no longer contain the leftmost page, or no longer be the
723  * true root, but where that isn't possible due to heavyweight
724  * locking, check that the first valid page meets caller's
725  * expectations.
726  */
727  if (state->readonly)
728  {
729  if (!P_LEFTMOST(opaque))
730  ereport(ERROR,
731  (errcode(ERRCODE_INDEX_CORRUPTED),
732  errmsg("block %u is not leftmost in index \"%s\"",
733  current, RelationGetRelationName(state->rel))));
734 
735  if (level.istruerootlevel && !P_ISROOT(opaque))
736  ereport(ERROR,
737  (errcode(ERRCODE_INDEX_CORRUPTED),
738  errmsg("block %u is not true root in index \"%s\"",
739  current, RelationGetRelationName(state->rel))));
740  }
741 
742  /*
743  * Before beginning any non-trivial examination of level, prepare
744  * state for next bt_check_level_from_leftmost() invocation for
745  * the next level for the next level down (if any).
746  *
747  * There should be at least one non-ignorable page per level,
748  * unless this is the leaf level, which is assumed by caller to be
749  * final level.
750  */
751  if (!P_ISLEAF(opaque))
752  {
753  IndexTuple itup;
754  ItemId itemid;
755 
756  /* Internal page -- downlink gets leftmost on next level */
757  itemid = PageGetItemIdCareful(state, state->targetblock,
758  state->target,
759  P_FIRSTDATAKEY(opaque));
760  itup = (IndexTuple) PageGetItem(state->target, itemid);
761  nextleveldown.leftmost = BTreeInnerTupleGetDownLink(itup);
762  nextleveldown.level = opaque->btpo.level - 1;
763  }
764  else
765  {
766  /*
767  * Leaf page -- final level caller must process.
768  *
769  * Note that this could also be the root page, if there has
770  * been no root page split yet.
771  */
772  nextleveldown.leftmost = P_NONE;
773  nextleveldown.level = InvalidBtreeLevel;
774  }
775 
776  /*
777  * Finished setting up state for this call/level. Control will
778  * never end up back here in any future loop iteration for this
779  * level.
780  */
781  }
782 
783  /*
784  * readonly mode can only ever land on live pages and half-dead pages,
785  * so sibling pointers should always be in mutual agreement
786  */
787  if (state->readonly && opaque->btpo_prev != leftcurrent)
788  ereport(ERROR,
789  (errcode(ERRCODE_INDEX_CORRUPTED),
790  errmsg("left link/right link pair in index \"%s\" not in agreement",
791  RelationGetRelationName(state->rel)),
792  errdetail_internal("Block=%u left block=%u left link from block=%u.",
793  current, leftcurrent, opaque->btpo_prev)));
794 
795  /* Check level, which must be valid for non-ignorable page */
796  if (level.level != opaque->btpo.level)
797  ereport(ERROR,
798  (errcode(ERRCODE_INDEX_CORRUPTED),
799  errmsg("leftmost down link for level points to block in index \"%s\" whose level is not one level down",
800  RelationGetRelationName(state->rel)),
801  errdetail_internal("Block pointed to=%u expected level=%u level in pointed to block=%u.",
802  current, level.level, opaque->btpo.level)));
803 
804  /* Verify invariants for page */
805  bt_target_page_check(state);
806 
807 nextpage:
808 
809  /* Try to detect circular links */
810  if (current == leftcurrent || current == opaque->btpo_prev)
811  ereport(ERROR,
812  (errcode(ERRCODE_INDEX_CORRUPTED),
813  errmsg("circular link chain found in block %u of index \"%s\"",
814  current, RelationGetRelationName(state->rel))));
815 
816  /*
817  * Record if page that is about to become target is the right half of
818  * an incomplete page split. This can go stale immediately in
819  * !readonly case.
820  */
821  state->rightsplit = P_INCOMPLETE_SPLIT(opaque);
822 
823  leftcurrent = current;
824  current = opaque->btpo_next;
825 
826  /* Free page and associated memory for this iteration */
828  }
829  while (current != P_NONE);
830 
831  /* Don't change context for caller */
832  MemoryContextSwitchTo(oldcontext);
833 
834  return nextleveldown;
835 }
836 
837 /*
838  * Function performs the following checks on target page, or pages ancillary to
839  * target page:
840  *
841  * - That every "real" data item is less than or equal to the high key, which
842  * is an upper bound on the items on the page. Data items should be
843  * strictly less than the high key when the page is an internal page.
844  *
845  * - That within the page, every data item is strictly less than the item
846  * immediately to its right, if any (i.e., that the items are in order
847  * within the page, so that the binary searches performed by index scans are
848  * sane).
849  *
850  * - That the last data item stored on the page is strictly less than the
851  * first data item on the page to the right (when such a first item is
852  * available).
853  *
854  * - Various checks on the structure of tuples themselves. For example, check
855  * that non-pivot tuples have no truncated attributes.
856  *
857  * Furthermore, when state passed shows ShareLock held, function also checks:
858  *
859  * - That all child pages respect strict lower bound from parent's pivot
860  * tuple.
861  *
862  * - That downlink to block was encountered in parent where that's expected.
863  * (Limited to heapallindexed readonly callers.)
864  *
865  * This is also where heapallindexed callers use their Bloom filter to
866  * fingerprint IndexTuples for later table_index_build_scan() verification.
867  *
868  * Note: Memory allocated in this routine is expected to be released by caller
869  * resetting state->targetcontext.
870  */
871 static void
873 {
874  OffsetNumber offset;
875  OffsetNumber max;
876  BTPageOpaque topaque;
877 
878  topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
879  max = PageGetMaxOffsetNumber(state->target);
880 
881  elog(DEBUG2, "verifying %u items on %s block %u", max,
882  P_ISLEAF(topaque) ? "leaf" : "internal", state->targetblock);
883 
884  /*
885  * Check the number of attributes in high key. Note, rightmost page
886  * doesn't contain a high key, so nothing to check
887  */
888  if (!P_RIGHTMOST(topaque))
889  {
890  ItemId itemid;
891  IndexTuple itup;
892 
893  /* Verify line pointer before checking tuple */
894  itemid = PageGetItemIdCareful(state, state->targetblock,
895  state->target, P_HIKEY);
896  if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
897  P_HIKEY))
898  {
899  itup = (IndexTuple) PageGetItem(state->target, itemid);
900  ereport(ERROR,
901  (errcode(ERRCODE_INDEX_CORRUPTED),
902  errmsg("wrong number of high key index tuple attributes in index \"%s\"",
903  RelationGetRelationName(state->rel)),
904  errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.",
905  state->targetblock,
906  BTreeTupleGetNAtts(itup, state->rel),
907  P_ISLEAF(topaque) ? "heap" : "index",
908  (uint32) (state->targetlsn >> 32),
909  (uint32) state->targetlsn)));
910  }
911  }
912 
913  /*
914  * Loop over page items, starting from first non-highkey item, not high
915  * key (if any). Most tests are not performed for the "negative infinity"
916  * real item (if any).
917  */
918  for (offset = P_FIRSTDATAKEY(topaque);
919  offset <= max;
920  offset = OffsetNumberNext(offset))
921  {
922  ItemId itemid;
923  IndexTuple itup;
924  size_t tupsize;
925  BTScanInsert skey;
926  bool lowersizelimit;
927 
929 
930  itemid = PageGetItemIdCareful(state, state->targetblock,
931  state->target, offset);
932  itup = (IndexTuple) PageGetItem(state->target, itemid);
933  tupsize = IndexTupleSize(itup);
934 
935  /*
936  * lp_len should match the IndexTuple reported length exactly, since
937  * lp_len is completely redundant in indexes, and both sources of
938  * tuple length are MAXALIGN()'d. nbtree does not use lp_len all that
939  * frequently, and is surprisingly tolerant of corrupt lp_len fields.
940  */
941  if (tupsize != ItemIdGetLength(itemid))
942  ereport(ERROR,
943  (errcode(ERRCODE_INDEX_CORRUPTED),
944  errmsg("index tuple size does not equal lp_len in index \"%s\"",
945  RelationGetRelationName(state->rel)),
946  errdetail_internal("Index tid=(%u,%u) tuple size=%zu lp_len=%u page lsn=%X/%X.",
947  state->targetblock, offset,
948  tupsize, ItemIdGetLength(itemid),
949  (uint32) (state->targetlsn >> 32),
950  (uint32) state->targetlsn),
951  errhint("This could be a torn page problem.")));
952 
953  /* Check the number of index tuple attributes */
954  if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
955  offset))
956  {
957  char *itid,
958  *htid;
959 
960  itid = psprintf("(%u,%u)", state->targetblock, offset);
961  htid = psprintf("(%u,%u)",
964 
965  ereport(ERROR,
966  (errcode(ERRCODE_INDEX_CORRUPTED),
967  errmsg("wrong number of index tuple attributes in index \"%s\"",
968  RelationGetRelationName(state->rel)),
969  errdetail_internal("Index tid=%s natts=%u points to %s tid=%s page lsn=%X/%X.",
970  itid,
971  BTreeTupleGetNAtts(itup, state->rel),
972  P_ISLEAF(topaque) ? "heap" : "index",
973  htid,
974  (uint32) (state->targetlsn >> 32),
975  (uint32) state->targetlsn)));
976  }
977 
978  /* Fingerprint downlink blocks in heapallindexed + readonly case */
979  if (state->heapallindexed && state->readonly && !P_ISLEAF(topaque))
980  {
981  BlockNumber childblock = BTreeInnerTupleGetDownLink(itup);
982 
984  (unsigned char *) &childblock,
985  sizeof(BlockNumber));
986  }
987 
988  /*
989  * Don't try to generate scankey using "negative infinity" item on
990  * internal pages. They are always truncated to zero attributes.
991  */
992  if (offset_is_negative_infinity(topaque, offset))
993  continue;
994 
995  /*
996  * Readonly callers may optionally verify that non-pivot tuples can
997  * each be found by an independent search that starts from the root
998  */
999  if (state->rootdescend && P_ISLEAF(topaque) &&
1000  !bt_rootdescend(state, itup))
1001  {
1002  char *itid,
1003  *htid;
1004 
1005  itid = psprintf("(%u,%u)", state->targetblock, offset);
1006  htid = psprintf("(%u,%u)",
1007  ItemPointerGetBlockNumber(&(itup->t_tid)),
1008  ItemPointerGetOffsetNumber(&(itup->t_tid)));
1009 
1010  ereport(ERROR,
1011  (errcode(ERRCODE_INDEX_CORRUPTED),
1012  errmsg("could not find tuple using search from root page in index \"%s\"",
1013  RelationGetRelationName(state->rel)),
1014  errdetail_internal("Index tid=%s points to heap tid=%s page lsn=%X/%X.",
1015  itid, htid,
1016  (uint32) (state->targetlsn >> 32),
1017  (uint32) state->targetlsn)));
1018  }
1019 
1020  /* Build insertion scankey for current page offset */
1021  skey = bt_mkscankey_pivotsearch(state->rel, itup);
1022 
1023  /*
1024  * Make sure tuple size does not exceed the relevant BTREE_VERSION
1025  * specific limit.
1026  *
1027  * BTREE_VERSION 4 (which introduced heapkeyspace rules) requisitioned
1028  * a small amount of space from BTMaxItemSize() in order to ensure
1029  * that suffix truncation always has enough space to add an explicit
1030  * heap TID back to a tuple -- we pessimistically assume that every
1031  * newly inserted tuple will eventually need to have a heap TID
1032  * appended during a future leaf page split, when the tuple becomes
1033  * the basis of the new high key (pivot tuple) for the leaf page.
1034  *
1035  * Since the reclaimed space is reserved for that purpose, we must not
1036  * enforce the slightly lower limit when the extra space has been used
1037  * as intended. In other words, there is only a cross-version
1038  * difference in the limit on tuple size within leaf pages.
1039  *
1040  * Still, we're particular about the details within BTREE_VERSION 4
1041  * internal pages. Pivot tuples may only use the extra space for its
1042  * designated purpose. Enforce the lower limit for pivot tuples when
1043  * an explicit heap TID isn't actually present. (In all other cases
1044  * suffix truncation is guaranteed to generate a pivot tuple that's no
1045  * larger than the first right tuple provided to it by its caller.)
1046  */
1047  lowersizelimit = skey->heapkeyspace &&
1048  (P_ISLEAF(topaque) || BTreeTupleGetHeapTID(itup) == NULL);
1049  if (tupsize > (lowersizelimit ? BTMaxItemSize(state->target) :
1050  BTMaxItemSizeNoHeapTid(state->target)))
1051  {
1052  char *itid,
1053  *htid;
1054 
1055  itid = psprintf("(%u,%u)", state->targetblock, offset);
1056  htid = psprintf("(%u,%u)",
1059 
1060  ereport(ERROR,
1061  (errcode(ERRCODE_INDEX_CORRUPTED),
1062  errmsg("index row size %zu exceeds maximum for index \"%s\"",
1063  tupsize, RelationGetRelationName(state->rel)),
1064  errdetail_internal("Index tid=%s points to %s tid=%s page lsn=%X/%X.",
1065  itid,
1066  P_ISLEAF(topaque) ? "heap" : "index",
1067  htid,
1068  (uint32) (state->targetlsn >> 32),
1069  (uint32) state->targetlsn)));
1070  }
1071 
1072  /* Fingerprint leaf page tuples (those that point to the heap) */
1073  if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid))
1074  {
1075  IndexTuple norm;
1076 
1077  norm = bt_normalize_tuple(state, itup);
1078  bloom_add_element(state->filter, (unsigned char *) norm,
1079  IndexTupleSize(norm));
1080  /* Be tidy */
1081  if (norm != itup)
1082  pfree(norm);
1083  }
1084 
1085  /*
1086  * * High key check *
1087  *
1088  * If there is a high key (if this is not the rightmost page on its
1089  * entire level), check that high key actually is upper bound on all
1090  * page items.
1091  *
1092  * We prefer to check all items against high key rather than checking
1093  * just the last and trusting that the operator class obeys the
1094  * transitive law (which implies that all previous items also
1095  * respected the high key invariant if they pass the item order
1096  * check).
1097  *
1098  * Ideally, we'd compare every item in the index against every other
1099  * item in the index, and not trust opclass obedience of the
1100  * transitive law to bridge the gap between children and their
1101  * grandparents (as well as great-grandparents, and so on). We don't
1102  * go to those lengths because that would be prohibitively expensive,
1103  * and probably not markedly more effective in practice.
1104  *
1105  * On the leaf level, we check that the key is <= the highkey.
1106  * However, on non-leaf levels we check that the key is < the highkey,
1107  * because the high key is "just another separator" rather than a copy
1108  * of some existing key item; we expect it to be unique among all keys
1109  * on the same level. (Suffix truncation will sometimes produce a
1110  * leaf highkey that is an untruncated copy of the lastleft item, but
1111  * never any other item, which necessitates weakening the leaf level
1112  * check to <=.)
1113  *
1114  * Full explanation for why a highkey is never truly a copy of another
1115  * item from the same level on internal levels:
1116  *
1117  * While the new left page's high key is copied from the first offset
1118  * on the right page during an internal page split, that's not the
1119  * full story. In effect, internal pages are split in the middle of
1120  * the firstright tuple, not between the would-be lastleft and
1121  * firstright tuples: the firstright key ends up on the left side as
1122  * left's new highkey, and the firstright downlink ends up on the
1123  * right side as right's new "negative infinity" item. The negative
1124  * infinity tuple is truncated to zero attributes, so we're only left
1125  * with the downlink. In other words, the copying is just an
1126  * implementation detail of splitting in the middle of a (pivot)
1127  * tuple. (See also: "Notes About Data Representation" in the nbtree
1128  * README.)
1129  */
1130  if (!P_RIGHTMOST(topaque) &&
1131  !(P_ISLEAF(topaque) ? invariant_leq_offset(state, skey, P_HIKEY) :
1132  invariant_l_offset(state, skey, P_HIKEY)))
1133  {
1134  char *itid,
1135  *htid;
1136 
1137  itid = psprintf("(%u,%u)", state->targetblock, offset);
1138  htid = psprintf("(%u,%u)",
1141 
1142  ereport(ERROR,
1143  (errcode(ERRCODE_INDEX_CORRUPTED),
1144  errmsg("high key invariant violated for index \"%s\"",
1145  RelationGetRelationName(state->rel)),
1146  errdetail_internal("Index tid=%s points to %s tid=%s page lsn=%X/%X.",
1147  itid,
1148  P_ISLEAF(topaque) ? "heap" : "index",
1149  htid,
1150  (uint32) (state->targetlsn >> 32),
1151  (uint32) state->targetlsn)));
1152  }
1153 
1154  /*
1155  * * Item order check *
1156  *
1157  * Check that items are stored on page in logical order, by checking
1158  * current item is strictly less than next item (if any).
1159  */
1160  if (OffsetNumberNext(offset) <= max &&
1161  !invariant_l_offset(state, skey, OffsetNumberNext(offset)))
1162  {
1163  char *itid,
1164  *htid,
1165  *nitid,
1166  *nhtid;
1167 
1168  itid = psprintf("(%u,%u)", state->targetblock, offset);
1169  htid = psprintf("(%u,%u)",
1172  nitid = psprintf("(%u,%u)", state->targetblock,
1173  OffsetNumberNext(offset));
1174 
1175  /* Reuse itup to get pointed-to heap location of second item */
1176  itemid = PageGetItemIdCareful(state, state->targetblock,
1177  state->target,
1178  OffsetNumberNext(offset));
1179  itup = (IndexTuple) PageGetItem(state->target, itemid);
1180  nhtid = psprintf("(%u,%u)",
1183 
1184  ereport(ERROR,
1185  (errcode(ERRCODE_INDEX_CORRUPTED),
1186  errmsg("item order invariant violated for index \"%s\"",
1187  RelationGetRelationName(state->rel)),
1188  errdetail_internal("Lower index tid=%s (points to %s tid=%s) "
1189  "higher index tid=%s (points to %s tid=%s) "
1190  "page lsn=%X/%X.",
1191  itid,
1192  P_ISLEAF(topaque) ? "heap" : "index",
1193  htid,
1194  nitid,
1195  P_ISLEAF(topaque) ? "heap" : "index",
1196  nhtid,
1197  (uint32) (state->targetlsn >> 32),
1198  (uint32) state->targetlsn)));
1199  }
1200 
1201  /*
1202  * * Last item check *
1203  *
1204  * Check last item against next/right page's first data item's when
1205  * last item on page is reached. This additional check will detect
1206  * transposed pages iff the supposed right sibling page happens to
1207  * belong before target in the key space. (Otherwise, a subsequent
1208  * heap verification will probably detect the problem.)
1209  *
1210  * This check is similar to the item order check that will have
1211  * already been performed for every other "real" item on target page
1212  * when last item is checked. The difference is that the next item
1213  * (the item that is compared to target's last item) needs to come
1214  * from the next/sibling page. There may not be such an item
1215  * available from sibling for various reasons, though (e.g., target is
1216  * the rightmost page on level).
1217  */
1218  else if (offset == max)
1219  {
1220  BTScanInsert rightkey;
1221 
1222  /* Get item in next/right page */
1223  rightkey = bt_right_page_check_scankey(state);
1224 
1225  if (rightkey &&
1226  !invariant_g_offset(state, rightkey, max))
1227  {
1228  /*
1229  * As explained at length in bt_right_page_check_scankey(),
1230  * there is a known !readonly race that could account for
1231  * apparent violation of invariant, which we must check for
1232  * before actually proceeding with raising error. Our canary
1233  * condition is that target page was deleted.
1234  */
1235  if (!state->readonly)
1236  {
1237  /* Get fresh copy of target page */
1238  state->target = palloc_btree_page(state, state->targetblock);
1239  /* Note that we deliberately do not update target LSN */
1240  topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
1241 
1242  /*
1243  * All !readonly checks now performed; just return
1244  */
1245  if (P_IGNORE(topaque))
1246  return;
1247  }
1248 
1249  ereport(ERROR,
1250  (errcode(ERRCODE_INDEX_CORRUPTED),
1251  errmsg("cross page item order invariant violated for index \"%s\"",
1252  RelationGetRelationName(state->rel)),
1253  errdetail_internal("Last item on page tid=(%u,%u) page lsn=%X/%X.",
1254  state->targetblock, offset,
1255  (uint32) (state->targetlsn >> 32),
1256  (uint32) state->targetlsn)));
1257  }
1258  }
1259 
1260  /*
1261  * * Downlink check *
1262  *
1263  * Additional check of child items iff this is an internal page and
1264  * caller holds a ShareLock. This happens for every downlink (item)
1265  * in target excluding the negative-infinity downlink (again, this is
1266  * because it has no useful value to compare).
1267  */
1268  if (!P_ISLEAF(topaque) && state->readonly)
1269  {
1270  BlockNumber childblock = BTreeInnerTupleGetDownLink(itup);
1271 
1272  bt_downlink_check(state, skey, childblock);
1273  }
1274  }
1275 
1276  /*
1277  * * Check if page has a downlink in parent *
1278  *
1279  * This can only be checked in heapallindexed + readonly case.
1280  */
1281  if (state->heapallindexed && state->readonly)
1283 }
1284 
1285 /*
1286  * Return a scankey for an item on page to right of current target (or the
1287  * first non-ignorable page), sufficient to check ordering invariant on last
1288  * item in current target page. Returned scankey relies on local memory
1289  * allocated for the child page, which caller cannot pfree(). Caller's memory
1290  * context should be reset between calls here.
1291  *
1292  * This is the first data item, and so all adjacent items are checked against
1293  * their immediate sibling item (which may be on a sibling page, or even a
1294  * "cousin" page at parent boundaries where target's rightlink points to page
1295  * with different parent page). If no such valid item is available, return
1296  * NULL instead.
1297  *
1298  * Note that !readonly callers must reverify that target page has not
1299  * been concurrently deleted.
1300  */
1301 static BTScanInsert
1303 {
1304  BTPageOpaque opaque;
1305  ItemId rightitem;
1306  IndexTuple firstitup;
1307  BlockNumber targetnext;
1308  Page rightpage;
1309  OffsetNumber nline;
1310 
1311  /* Determine target's next block number */
1312  opaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
1313 
1314  /* If target is already rightmost, no right sibling; nothing to do here */
1315  if (P_RIGHTMOST(opaque))
1316  return NULL;
1317 
1318  /*
1319  * General notes on concurrent page splits and page deletion:
1320  *
1321  * Routines like _bt_search() don't require *any* page split interlock
1322  * when descending the tree, including something very light like a buffer
1323  * pin. That's why it's okay that we don't either. This avoidance of any
1324  * need to "couple" buffer locks is the raison d' etre of the Lehman & Yao
1325  * algorithm, in fact.
1326  *
1327  * That leaves deletion. A deleted page won't actually be recycled by
1328  * VACUUM early enough for us to fail to at least follow its right link
1329  * (or left link, or downlink) and find its sibling, because recycling
1330  * does not occur until no possible index scan could land on the page.
1331  * Index scans can follow links with nothing more than their snapshot as
1332  * an interlock and be sure of at least that much. (See page
1333  * recycling/RecentGlobalXmin notes in nbtree README.)
1334  *
1335  * Furthermore, it's okay if we follow a rightlink and find a half-dead or
1336  * dead (ignorable) page one or more times. There will either be a
1337  * further right link to follow that leads to a live page before too long
1338  * (before passing by parent's rightmost child), or we will find the end
1339  * of the entire level instead (possible when parent page is itself the
1340  * rightmost on its level).
1341  */
1342  targetnext = opaque->btpo_next;
1343  for (;;)
1344  {
1346 
1347  rightpage = palloc_btree_page(state, targetnext);
1348  opaque = (BTPageOpaque) PageGetSpecialPointer(rightpage);
1349 
1350  if (!P_IGNORE(opaque) || P_RIGHTMOST(opaque))
1351  break;
1352 
1353  /* We landed on a deleted page, so step right to find a live page */
1354  targetnext = opaque->btpo_next;
1355  ereport(DEBUG1,
1356  (errcode(ERRCODE_NO_DATA),
1357  errmsg("level %u leftmost page of index \"%s\" was found deleted or half dead",
1358  opaque->btpo.level, RelationGetRelationName(state->rel)),
1359  errdetail_internal("Deleted page found when building scankey from right sibling.")));
1360 
1361  /* Be slightly more pro-active in freeing this memory, just in case */
1362  pfree(rightpage);
1363  }
1364 
1365  /*
1366  * No ShareLock held case -- why it's safe to proceed.
1367  *
1368  * Problem:
1369  *
1370  * We must avoid false positive reports of corruption when caller treats
1371  * item returned here as an upper bound on target's last item. In
1372  * general, false positives are disallowed. Avoiding them here when
1373  * caller is !readonly is subtle.
1374  *
1375  * A concurrent page deletion by VACUUM of the target page can result in
1376  * the insertion of items on to this right sibling page that would
1377  * previously have been inserted on our target page. There might have
1378  * been insertions that followed the target's downlink after it was made
1379  * to point to right sibling instead of target by page deletion's first
1380  * phase. The inserters insert items that would belong on target page.
1381  * This race is very tight, but it's possible. This is our only problem.
1382  *
1383  * Non-problems:
1384  *
1385  * We are not hindered by a concurrent page split of the target; we'll
1386  * never land on the second half of the page anyway. A concurrent split
1387  * of the right page will also not matter, because the first data item
1388  * remains the same within the left half, which we'll reliably land on. If
1389  * we had to skip over ignorable/deleted pages, it cannot matter because
1390  * their key space has already been atomically merged with the first
1391  * non-ignorable page we eventually find (doesn't matter whether the page
1392  * we eventually find is a true sibling or a cousin of target, which we go
1393  * into below).
1394  *
1395  * Solution:
1396  *
1397  * Caller knows that it should reverify that target is not ignorable
1398  * (half-dead or deleted) when cross-page sibling item comparison appears
1399  * to indicate corruption (invariant fails). This detects the single race
1400  * condition that exists for caller. This is correct because the
1401  * continued existence of target block as non-ignorable (not half-dead or
1402  * deleted) implies that target page was not merged into from the right by
1403  * deletion; the key space at or after target never moved left. Target's
1404  * parent either has the same downlink to target as before, or a <
1405  * downlink due to deletion at the left of target. Target either has the
1406  * same highkey as before, or a highkey < before when there is a page
1407  * split. (The rightmost concurrently-split-from-target-page page will
1408  * still have the same highkey as target was originally found to have,
1409  * which for our purposes is equivalent to target's highkey itself never
1410  * changing, since we reliably skip over
1411  * concurrently-split-from-target-page pages.)
1412  *
1413  * In simpler terms, we allow that the key space of the target may expand
1414  * left (the key space can move left on the left side of target only), but
1415  * the target key space cannot expand right and get ahead of us without
1416  * our detecting it. The key space of the target cannot shrink, unless it
1417  * shrinks to zero due to the deletion of the original page, our canary
1418  * condition. (To be very precise, we're a bit stricter than that because
1419  * it might just have been that the target page split and only the
1420  * original target page was deleted. We can be more strict, just not more
1421  * lax.)
1422  *
1423  * Top level tree walk caller moves on to next page (makes it the new
1424  * target) following recovery from this race. (cf. The rationale for
1425  * child/downlink verification needing a ShareLock within
1426  * bt_downlink_check(), where page deletion is also the main source of
1427  * trouble.)
1428  *
1429  * Note that it doesn't matter if right sibling page here is actually a
1430  * cousin page, because in order for the key space to be readjusted in a
1431  * way that causes us issues in next level up (guiding problematic
1432  * concurrent insertions to the cousin from the grandparent rather than to
1433  * the sibling from the parent), there'd have to be page deletion of
1434  * target's parent page (affecting target's parent's downlink in target's
1435  * grandparent page). Internal page deletion only occurs when there are
1436  * no child pages (they were all fully deleted), and caller is checking
1437  * that the target's parent has at least one non-deleted (so
1438  * non-ignorable) child: the target page. (Note that the first phase of
1439  * deletion atomically marks the page to be deleted half-dead/ignorable at
1440  * the same time downlink in its parent is removed, so caller will
1441  * definitely not fail to detect that this happened.)
1442  *
1443  * This trick is inspired by the method backward scans use for dealing
1444  * with concurrent page splits; concurrent page deletion is a problem that
1445  * similarly receives special consideration sometimes (it's possible that
1446  * the backwards scan will re-read its "original" block after failing to
1447  * find a right-link to it, having already moved in the opposite direction
1448  * (right/"forwards") a few times to try to locate one). Just like us,
1449  * that happens only to determine if there was a concurrent page deletion
1450  * of a reference page, and just like us if there was a page deletion of
1451  * that reference page it means we can move on from caring about the
1452  * reference page. See the nbtree README for a full description of how
1453  * that works.
1454  */
1455  nline = PageGetMaxOffsetNumber(rightpage);
1456 
1457  /*
1458  * Get first data item, if any
1459  */
1460  if (P_ISLEAF(opaque) && nline >= P_FIRSTDATAKEY(opaque))
1461  {
1462  /* Return first data item (if any) */
1463  rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
1464  P_FIRSTDATAKEY(opaque));
1465  }
1466  else if (!P_ISLEAF(opaque) &&
1467  nline >= OffsetNumberNext(P_FIRSTDATAKEY(opaque)))
1468  {
1469  /*
1470  * Return first item after the internal page's "negative infinity"
1471  * item
1472  */
1473  rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
1474  OffsetNumberNext(P_FIRSTDATAKEY(opaque)));
1475  }
1476  else
1477  {
1478  /*
1479  * No first item. Page is probably empty leaf page, but it's also
1480  * possible that it's an internal page with only a negative infinity
1481  * item.
1482  */
1483  ereport(DEBUG1,
1484  (errcode(ERRCODE_NO_DATA),
1485  errmsg("%s block %u of index \"%s\" has no first data item",
1486  P_ISLEAF(opaque) ? "leaf" : "internal", targetnext,
1487  RelationGetRelationName(state->rel))));
1488  return NULL;
1489  }
1490 
1491  /*
1492  * Return first real item scankey. Note that this relies on right page
1493  * memory remaining allocated.
1494  */
1495  firstitup = (IndexTuple) PageGetItem(rightpage, rightitem);
1496  return bt_mkscankey_pivotsearch(state->rel, firstitup);
1497 }
1498 
1499 /*
1500  * Checks one of target's downlink against its child page.
1501  *
1502  * Conceptually, the target page continues to be what is checked here. The
1503  * target block is still blamed in the event of finding an invariant violation.
1504  * The downlink insertion into the target is probably where any problem raised
1505  * here arises, and there is no such thing as a parent link, so doing the
1506  * verification this way around is much more practical.
1507  */
1508 static void
1510  BlockNumber childblock)
1511 {
1512  OffsetNumber offset;
1513  OffsetNumber maxoffset;
1514  Page child;
1515  BTPageOpaque copaque;
1516 
1517  /*
1518  * Caller must have ShareLock on target relation, because of
1519  * considerations around page deletion by VACUUM.
1520  *
1521  * NB: In general, page deletion deletes the right sibling's downlink, not
1522  * the downlink of the page being deleted; the deleted page's downlink is
1523  * reused for its sibling. The key space is thereby consolidated between
1524  * the deleted page and its right sibling. (We cannot delete a parent
1525  * page's rightmost child unless it is the last child page, and we intend
1526  * to also delete the parent itself.)
1527  *
1528  * If this verification happened without a ShareLock, the following race
1529  * condition could cause false positives:
1530  *
1531  * In general, concurrent page deletion might occur, including deletion of
1532  * the left sibling of the child page that is examined here. If such a
1533  * page deletion were to occur, closely followed by an insertion into the
1534  * newly expanded key space of the child, a window for the false positive
1535  * opens up: the stale parent/target downlink originally followed to get
1536  * to the child legitimately ceases to be a lower bound on all items in
1537  * the page, since the key space was concurrently expanded "left".
1538  * (Insertion followed the "new" downlink for the child, not our now-stale
1539  * downlink, which was concurrently physically removed in target/parent as
1540  * part of deletion's first phase.)
1541  *
1542  * Note that while the cross-page-same-level last item check uses a trick
1543  * that allows it to perform verification for !readonly callers, a similar
1544  * trick seems difficult here. The trick that that other check uses is,
1545  * in essence, to lock down race conditions to those that occur due to
1546  * concurrent page deletion of the target; that's a race that can be
1547  * reliably detected before actually reporting corruption.
1548  *
1549  * On the other hand, we'd need to lock down race conditions involving
1550  * deletion of child's left page, for long enough to read the child page
1551  * into memory (in other words, a scheme with concurrently held buffer
1552  * locks on both child and left-of-child pages). That's unacceptable for
1553  * amcheck functions on general principle, though.
1554  */
1555  Assert(state->readonly);
1556 
1557  /*
1558  * Verify child page has the downlink key from target page (its parent) as
1559  * a lower bound; downlink must be strictly less than all keys on the
1560  * page.
1561  *
1562  * Check all items, rather than checking just the first and trusting that
1563  * the operator class obeys the transitive law.
1564  */
1565  child = palloc_btree_page(state, childblock);
1566  copaque = (BTPageOpaque) PageGetSpecialPointer(child);
1567  maxoffset = PageGetMaxOffsetNumber(child);
1568 
1569  /*
1570  * Since there cannot be a concurrent VACUUM operation in readonly mode,
1571  * and since a page has no links within other pages (siblings and parent)
1572  * once it is marked fully deleted, it should be impossible to land on a
1573  * fully deleted page.
1574  *
1575  * It does not quite make sense to enforce that the page cannot even be
1576  * half-dead, despite the fact the downlink is modified at the same stage
1577  * that the child leaf page is marked half-dead. That's incorrect because
1578  * there may occasionally be multiple downlinks from a chain of pages
1579  * undergoing deletion, where multiple successive calls are made to
1580  * _bt_unlink_halfdead_page() by VACUUM before it can finally safely mark
1581  * the leaf page as fully dead. While _bt_mark_page_halfdead() usually
1582  * removes the downlink to the leaf page that is marked half-dead, that's
1583  * not guaranteed, so it's possible we'll land on a half-dead page with a
1584  * downlink due to an interrupted multi-level page deletion.
1585  *
1586  * We go ahead with our checks if the child page is half-dead. It's safe
1587  * to do so because we do not test the child's high key, so it does not
1588  * matter that the original high key will have been replaced by a dummy
1589  * truncated high key within _bt_mark_page_halfdead(). All other page
1590  * items are left intact on a half-dead page, so there is still something
1591  * to test.
1592  */
1593  if (P_ISDELETED(copaque))
1594  ereport(ERROR,
1595  (errcode(ERRCODE_INDEX_CORRUPTED),
1596  errmsg("downlink to deleted page found in index \"%s\"",
1597  RelationGetRelationName(state->rel)),
1598  errdetail_internal("Parent block=%u child block=%u parent page lsn=%X/%X.",
1599  state->targetblock, childblock,
1600  (uint32) (state->targetlsn >> 32),
1601  (uint32) state->targetlsn)));
1602 
1603  for (offset = P_FIRSTDATAKEY(copaque);
1604  offset <= maxoffset;
1605  offset = OffsetNumberNext(offset))
1606  {
1607  /*
1608  * Skip comparison of target page key against "negative infinity"
1609  * item, if any. Checking it would indicate that it's not a strict
1610  * lower bound, but that's only because of the hard-coding for
1611  * negative infinity items within _bt_compare().
1612  *
1613  * If nbtree didn't truncate negative infinity tuples during internal
1614  * page splits then we'd expect child's negative infinity key to be
1615  * equal to the scankey/downlink from target/parent (it would be a
1616  * "low key" in this hypothetical scenario, and so it would still need
1617  * to be treated as a special case here).
1618  *
1619  * Negative infinity items can be thought of as a strict lower bound
1620  * that works transitively, with the last non-negative-infinity pivot
1621  * followed during a descent from the root as its "true" strict lower
1622  * bound. Only a small number of negative infinity items are truly
1623  * negative infinity; those that are the first items of leftmost
1624  * internal pages. In more general terms, a negative infinity item is
1625  * only negative infinity with respect to the subtree that the page is
1626  * at the root of.
1627  *
1628  * See also: bt_rootdescend(), which can even detect transitive
1629  * inconsistencies on cousin leaf pages.
1630  */
1631  if (offset_is_negative_infinity(copaque, offset))
1632  continue;
1633 
1634  if (!invariant_l_nontarget_offset(state, targetkey, childblock, child,
1635  offset))
1636  ereport(ERROR,
1637  (errcode(ERRCODE_INDEX_CORRUPTED),
1638  errmsg("down-link lower bound invariant violated for index \"%s\"",
1639  RelationGetRelationName(state->rel)),
1640  errdetail_internal("Parent block=%u child index tid=(%u,%u) parent page lsn=%X/%X.",
1641  state->targetblock, childblock, offset,
1642  (uint32) (state->targetlsn >> 32),
1643  (uint32) state->targetlsn)));
1644  }
1645 
1646  pfree(child);
1647 }
1648 
1649 /*
1650  * Checks if page is missing a downlink that it should have.
1651  *
1652  * A page that lacks a downlink/parent may indicate corruption. However, we
1653  * must account for the fact that a missing downlink can occasionally be
1654  * encountered in a non-corrupt index. This can be due to an interrupted page
1655  * split, or an interrupted multi-level page deletion (i.e. there was a hard
1656  * crash or an error during a page split, or while VACUUM was deleting a
1657  * multi-level chain of pages).
1658  *
1659  * Note that this can only be called in readonly mode, so there is no need to
1660  * be concerned about concurrent page splits or page deletions.
1661  */
1662 static void
1664 {
1666  ItemId itemid;
1667  IndexTuple itup;
1668  Page child;
1669  BTPageOpaque copaque;
1670  uint32 level;
1671  BlockNumber childblk;
1672 
1673  Assert(state->heapallindexed && state->readonly);
1674  Assert(!P_IGNORE(topaque));
1675 
1676  /* No next level up with downlinks to fingerprint from the true root */
1677  if (P_ISROOT(topaque))
1678  return;
1679 
1680  /*
1681  * Incomplete (interrupted) page splits can account for the lack of a
1682  * downlink. Some inserting transaction should eventually complete the
1683  * page split in passing, when it notices that the left sibling page is
1684  * P_INCOMPLETE_SPLIT().
1685  *
1686  * In general, VACUUM is not prepared for there to be no downlink to a
1687  * page that it deletes. This is the main reason why the lack of a
1688  * downlink can be reported as corruption here. It's not obvious that an
1689  * invalid missing downlink can result in wrong answers to queries,
1690  * though, since index scans that land on the child may end up
1691  * consistently moving right. The handling of concurrent page splits (and
1692  * page deletions) within _bt_moveright() cannot distinguish
1693  * inconsistencies that last for a moment from inconsistencies that are
1694  * permanent and irrecoverable.
1695  *
1696  * VACUUM isn't even prepared to delete pages that have no downlink due to
1697  * an incomplete page split, but it can detect and reason about that case
1698  * by design, so it shouldn't be taken to indicate corruption. See
1699  * _bt_pagedel() for full details.
1700  */
1701  if (state->rightsplit)
1702  {
1703  ereport(DEBUG1,
1704  (errcode(ERRCODE_NO_DATA),
1705  errmsg("harmless interrupted page split detected in index %s",
1706  RelationGetRelationName(state->rel)),
1707  errdetail_internal("Block=%u level=%u left sibling=%u page lsn=%X/%X.",
1708  state->targetblock, topaque->btpo.level,
1709  topaque->btpo_prev,
1710  (uint32) (state->targetlsn >> 32),
1711  (uint32) state->targetlsn)));
1712  return;
1713  }
1714 
1715  /* Target's downlink is typically present in parent/fingerprinted */
1716  if (!bloom_lacks_element(state->downlinkfilter,
1717  (unsigned char *) &state->targetblock,
1718  sizeof(BlockNumber)))
1719  return;
1720 
1721  /*
1722  * Target is probably the "top parent" of a multi-level page deletion.
1723  * We'll need to descend the subtree to make sure that descendant pages
1724  * are consistent with that, though.
1725  *
1726  * If the target page (which must be non-ignorable) is a leaf page, then
1727  * clearly it can't be the top parent. The lack of a downlink is probably
1728  * a symptom of a broad problem that could just as easily cause
1729  * inconsistencies anywhere else.
1730  */
1731  if (P_ISLEAF(topaque))
1732  ereport(ERROR,
1733  (errcode(ERRCODE_INDEX_CORRUPTED),
1734  errmsg("leaf index block lacks downlink in index \"%s\"",
1735  RelationGetRelationName(state->rel)),
1736  errdetail_internal("Block=%u page lsn=%X/%X.",
1737  state->targetblock,
1738  (uint32) (state->targetlsn >> 32),
1739  (uint32) state->targetlsn)));
1740 
1741  /* Descend from the target page, which is an internal page */
1742  elog(DEBUG1, "checking for interrupted multi-level deletion due to missing downlink in index \"%s\"",
1743  RelationGetRelationName(state->rel));
1744 
1745  level = topaque->btpo.level;
1746  itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
1747  P_FIRSTDATAKEY(topaque));
1748  itup = (IndexTuple) PageGetItem(state->target, itemid);
1749  childblk = BTreeInnerTupleGetDownLink(itup);
1750  for (;;)
1751  {
1753 
1754  child = palloc_btree_page(state, childblk);
1755  copaque = (BTPageOpaque) PageGetSpecialPointer(child);
1756 
1757  if (P_ISLEAF(copaque))
1758  break;
1759 
1760  /* Do an extra sanity check in passing on internal pages */
1761  if (copaque->btpo.level != level - 1)
1762  ereport(ERROR,
1763  (errcode(ERRCODE_INDEX_CORRUPTED),
1764  errmsg_internal("downlink points to block in index \"%s\" whose level is not one level down",
1765  RelationGetRelationName(state->rel)),
1766  errdetail_internal("Top parent/target block=%u block pointed to=%u expected level=%u level in pointed to block=%u.",
1767  state->targetblock, childblk,
1768  level - 1, copaque->btpo.level)));
1769 
1770  level = copaque->btpo.level;
1771  itemid = PageGetItemIdCareful(state, childblk, child,
1772  P_FIRSTDATAKEY(copaque));
1773  itup = (IndexTuple) PageGetItem(child, itemid);
1774  childblk = BTreeInnerTupleGetDownLink(itup);
1775  /* Be slightly more pro-active in freeing this memory, just in case */
1776  pfree(child);
1777  }
1778 
1779  /*
1780  * Since there cannot be a concurrent VACUUM operation in readonly mode,
1781  * and since a page has no links within other pages (siblings and parent)
1782  * once it is marked fully deleted, it should be impossible to land on a
1783  * fully deleted page. See bt_downlink_check() for further details.
1784  *
1785  * The bt_downlink_check() P_ISDELETED() check is repeated here because
1786  * bt_downlink_check() does not visit pages reachable through negative
1787  * infinity items. Besides, bt_downlink_check() is unwilling to descend
1788  * multiple levels. (The similar bt_downlink_check() P_ISDELETED() check
1789  * within bt_check_level_from_leftmost() won't reach the page either,
1790  * since the leaf's live siblings should have their sibling links updated
1791  * to bypass the deletion target page when it is marked fully dead.)
1792  *
1793  * If this error is raised, it might be due to a previous multi-level page
1794  * deletion that failed to realize that it wasn't yet safe to mark the
1795  * leaf page as fully dead. A "dangling downlink" will still remain when
1796  * this happens. The fact that the dangling downlink's page (the leaf's
1797  * parent/ancestor page) lacked a downlink is incidental.
1798  */
1799  if (P_ISDELETED(copaque))
1800  ereport(ERROR,
1801  (errcode(ERRCODE_INDEX_CORRUPTED),
1802  errmsg_internal("downlink to deleted leaf page found in index \"%s\"",
1803  RelationGetRelationName(state->rel)),
1804  errdetail_internal("Top parent/target block=%u leaf block=%u top parent/target lsn=%X/%X.",
1805  state->targetblock, childblk,
1806  (uint32) (state->targetlsn >> 32),
1807  (uint32) state->targetlsn)));
1808 
1809  /*
1810  * Iff leaf page is half-dead, its high key top parent link should point
1811  * to what VACUUM considered to be the top parent page at the instant it
1812  * was interrupted. Provided the high key link actually points to the
1813  * target page, the missing downlink we detected is consistent with there
1814  * having been an interrupted multi-level page deletion. This means that
1815  * the subtree with the target page at its root (a page deletion chain) is
1816  * in a consistent state, enabling VACUUM to resume deleting the entire
1817  * chain the next time it encounters the half-dead leaf page.
1818  */
1819  if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque))
1820  {
1821  itemid = PageGetItemIdCareful(state, childblk, child, P_HIKEY);
1822  itup = (IndexTuple) PageGetItem(child, itemid);
1823  if (BTreeTupleGetTopParent(itup) == state->targetblock)
1824  return;
1825  }
1826 
1827  ereport(ERROR,
1828  (errcode(ERRCODE_INDEX_CORRUPTED),
1829  errmsg("internal index block lacks downlink in index \"%s\"",
1830  RelationGetRelationName(state->rel)),
1831  errdetail_internal("Block=%u level=%u page lsn=%X/%X.",
1832  state->targetblock, topaque->btpo.level,
1833  (uint32) (state->targetlsn >> 32),
1834  (uint32) state->targetlsn)));
1835 }
1836 
1837 /*
1838  * Per-tuple callback from table_index_build_scan, used to determine if index has
1839  * all the entries that definitely should have been observed in leaf pages of
1840  * the target index (that is, all IndexTuples that were fingerprinted by our
1841  * Bloom filter). All heapallindexed checks occur here.
1842  *
1843  * The redundancy between an index and the table it indexes provides a good
1844  * opportunity to detect corruption, especially corruption within the table.
1845  * The high level principle behind the verification performed here is that any
1846  * IndexTuple that should be in an index following a fresh CREATE INDEX (based
1847  * on the same index definition) should also have been in the original,
1848  * existing index, which should have used exactly the same representation
1849  *
1850  * Since the overall structure of the index has already been verified, the most
1851  * likely explanation for error here is a corrupt heap page (could be logical
1852  * or physical corruption). Index corruption may still be detected here,
1853  * though. Only readonly callers will have verified that left links and right
1854  * links are in agreement, and so it's possible that a leaf page transposition
1855  * within index is actually the source of corruption detected here (for
1856  * !readonly callers). The checks performed only for readonly callers might
1857  * more accurately frame the problem as a cross-page invariant issue (this
1858  * could even be due to recovery not replaying all WAL records). The !readonly
1859  * ERROR message raised here includes a HINT about retrying with readonly
1860  * verification, just in case it's a cross-page invariant issue, though that
1861  * isn't particularly likely.
1862  *
1863  * table_index_build_scan() expects to be able to find the root tuple when a
1864  * heap-only tuple (the live tuple at the end of some HOT chain) needs to be
1865  * indexed, in order to replace the actual tuple's TID with the root tuple's
1866  * TID (which is what we're actually passed back here). The index build heap
1867  * scan code will raise an error when a tuple that claims to be the root of the
1868  * heap-only tuple's HOT chain cannot be located. This catches cases where the
1869  * original root item offset/root tuple for a HOT chain indicates (for whatever
1870  * reason) that the entire HOT chain is dead, despite the fact that the latest
1871  * heap-only tuple should be indexed. When this happens, sequential scans may
1872  * always give correct answers, and all indexes may be considered structurally
1873  * consistent (i.e. the nbtree structural checks would not detect corruption).
1874  * It may be the case that only index scans give wrong answers, and yet heap or
1875  * SLRU corruption is the real culprit. (While it's true that LP_DEAD bit
1876  * setting will probably also leave the index in a corrupt state before too
1877  * long, the problem is nonetheless that there is heap corruption.)
1878  *
1879  * Heap-only tuple handling within table_index_build_scan() works in a way that
1880  * helps us to detect index tuples that contain the wrong values (values that
1881  * don't match the latest tuple in the HOT chain). This can happen when there
1882  * is no superseding index tuple due to a faulty assessment of HOT safety,
1883  * perhaps during the original CREATE INDEX. Because the latest tuple's
1884  * contents are used with the root TID, an error will be raised when a tuple
1885  * with the same TID but non-matching attribute values is passed back to us.
1886  * Faulty assessment of HOT-safety was behind at least two distinct CREATE
1887  * INDEX CONCURRENTLY bugs that made it into stable releases, one of which was
1888  * undetected for many years. In short, the same principle that allows a
1889  * REINDEX to repair corruption when there was an (undetected) broken HOT chain
1890  * also allows us to detect the corruption in many cases.
1891  */
1892 static void
1894  bool *isnull, bool tupleIsAlive, void *checkstate)
1895 {
1896  BtreeCheckState *state = (BtreeCheckState *) checkstate;
1897  IndexTuple itup,
1898  norm;
1899 
1900  Assert(state->heapallindexed);
1901 
1902  /* Generate a normalized index tuple for fingerprinting */
1903  itup = index_form_tuple(RelationGetDescr(index), values, isnull);
1904  itup->t_tid = htup->t_self;
1905  norm = bt_normalize_tuple(state, itup);
1906 
1907  /* Probe Bloom filter -- tuple should be present */
1908  if (bloom_lacks_element(state->filter, (unsigned char *) norm,
1909  IndexTupleSize(norm)))
1910  ereport(ERROR,
1912  errmsg("heap tuple (%u,%u) from table \"%s\" lacks matching index tuple within index \"%s\"",
1913  ItemPointerGetBlockNumber(&(itup->t_tid)),
1916  RelationGetRelationName(state->rel)),
1917  !state->readonly
1918  ? errhint("Retrying verification using the function bt_index_parent_check() might provide a more specific error.")
1919  : 0));
1920 
1921  state->heaptuplespresent++;
1922  pfree(itup);
1923  /* Cannot leak memory here */
1924  if (norm != itup)
1925  pfree(norm);
1926 }
1927 
1928 /*
1929  * Normalize an index tuple for fingerprinting.
1930  *
1931  * In general, index tuple formation is assumed to be deterministic by
1932  * heapallindexed verification, and IndexTuples are assumed immutable. While
1933  * the LP_DEAD bit is mutable in leaf pages, that's ItemId metadata, which is
1934  * not fingerprinted. Normalization is required to compensate for corner
1935  * cases where the determinism assumption doesn't quite work.
1936  *
1937  * There is currently one such case: index_form_tuple() does not try to hide
1938  * the source TOAST state of input datums. The executor applies TOAST
1939  * compression for heap tuples based on different criteria to the compression
1940  * applied within btinsert()'s call to index_form_tuple(): it sometimes
1941  * compresses more aggressively, resulting in compressed heap tuple datums but
1942  * uncompressed corresponding index tuple datums. A subsequent heapallindexed
1943  * verification will get a logically equivalent though bitwise unequal tuple
1944  * from index_form_tuple(). False positive heapallindexed corruption reports
1945  * could occur without normalizing away the inconsistency.
1946  *
1947  * Returned tuple is often caller's own original tuple. Otherwise, it is a
1948  * new representation of caller's original index tuple, palloc()'d in caller's
1949  * memory context.
1950  *
1951  * Note: This routine is not concerned with distinctions about the
1952  * representation of tuples beyond those that might break heapallindexed
1953  * verification. In particular, it won't try to normalize opclass-equal
1954  * datums with potentially distinct representations (e.g., btree/numeric_ops
1955  * index datums will not get their display scale normalized-away here).
1956  * Normalization may need to be expanded to handle more cases in the future,
1957  * though. For example, it's possible that non-pivot tuples could in the
1958  * future have alternative logically equivalent representations due to using
1959  * the INDEX_ALT_TID_MASK bit to implement intelligent deduplication.
1960  */
1961 static IndexTuple
1963 {
1964  TupleDesc tupleDescriptor = RelationGetDescr(state->rel);
1965  Datum normalized[INDEX_MAX_KEYS];
1966  bool isnull[INDEX_MAX_KEYS];
1967  bool toast_free[INDEX_MAX_KEYS];
1968  bool formnewtup = false;
1969  IndexTuple reformed;
1970  int i;
1971 
1972  /* Easy case: It's immediately clear that tuple has no varlena datums */
1973  if (!IndexTupleHasVarwidths(itup))
1974  return itup;
1975 
1976  for (i = 0; i < tupleDescriptor->natts; i++)
1977  {
1978  Form_pg_attribute att;
1979 
1980  att = TupleDescAttr(tupleDescriptor, i);
1981 
1982  /* Assume untoasted/already normalized datum initially */
1983  toast_free[i] = false;
1984  normalized[i] = index_getattr(itup, att->attnum,
1985  tupleDescriptor,
1986  &isnull[i]);
1987  if (att->attbyval || att->attlen != -1 || isnull[i])
1988  continue;
1989 
1990  /*
1991  * Callers always pass a tuple that could safely be inserted into the
1992  * index without further processing, so an external varlena header
1993  * should never be encountered here
1994  */
1995  if (VARATT_IS_EXTERNAL(DatumGetPointer(normalized[i])))
1996  ereport(ERROR,
1997  (errcode(ERRCODE_INDEX_CORRUPTED),
1998  errmsg("external varlena datum in tuple that references heap row (%u,%u) in index \"%s\"",
1999  ItemPointerGetBlockNumber(&(itup->t_tid)),
2001  RelationGetRelationName(state->rel))));
2002  else if (VARATT_IS_COMPRESSED(DatumGetPointer(normalized[i])))
2003  {
2004  formnewtup = true;
2005  normalized[i] = PointerGetDatum(PG_DETOAST_DATUM(normalized[i]));
2006  toast_free[i] = true;
2007  }
2008  }
2009 
2010  /* Easier case: Tuple has varlena datums, none of which are compressed */
2011  if (!formnewtup)
2012  return itup;
2013 
2014  /*
2015  * Hard case: Tuple had compressed varlena datums that necessitate
2016  * creating normalized version of the tuple from uncompressed input datums
2017  * (normalized input datums). This is rather naive, but shouldn't be
2018  * necessary too often.
2019  *
2020  * Note that we rely on deterministic index_form_tuple() TOAST compression
2021  * of normalized input.
2022  */
2023  reformed = index_form_tuple(tupleDescriptor, normalized, isnull);
2024  reformed->t_tid = itup->t_tid;
2025 
2026  /* Cannot leak memory here */
2027  for (i = 0; i < tupleDescriptor->natts; i++)
2028  if (toast_free[i])
2029  pfree(DatumGetPointer(normalized[i]));
2030 
2031  return reformed;
2032 }
2033 
2034 /*
2035  * Search for itup in index, starting from fast root page. itup must be a
2036  * non-pivot tuple. This is only supported with heapkeyspace indexes, since
2037  * we rely on having fully unique keys to find a match with only a single
2038  * visit to a leaf page, barring an interrupted page split, where we may have
2039  * to move right. (A concurrent page split is impossible because caller must
2040  * be readonly caller.)
2041  *
2042  * This routine can detect very subtle transitive consistency issues across
2043  * more than one level of the tree. Leaf pages all have a high key (even the
2044  * rightmost page has a conceptual positive infinity high key), but not a low
2045  * key. Their downlink in parent is a lower bound, which along with the high
2046  * key is almost enough to detect every possible inconsistency. A downlink
2047  * separator key value won't always be available from parent, though, because
2048  * the first items of internal pages are negative infinity items, truncated
2049  * down to zero attributes during internal page splits. While it's true that
2050  * bt_downlink_check() and the high key check can detect most imaginable key
2051  * space problems, there are remaining problems it won't detect with non-pivot
2052  * tuples in cousin leaf pages. Starting a search from the root for every
2053  * existing leaf tuple detects small inconsistencies in upper levels of the
2054  * tree that cannot be detected any other way. (Besides all this, this is
2055  * probably also useful as a direct test of the code used by index scans
2056  * themselves.)
2057  */
2058 static bool
2060 {
2061  BTScanInsert key;
2062  BTStack stack;
2063  Buffer lbuf;
2064  bool exists;
2065 
2066  key = _bt_mkscankey(state->rel, itup);
2067  Assert(key->heapkeyspace && key->scantid != NULL);
2068 
2069  /*
2070  * Search from root.
2071  *
2072  * Ideally, we would arrange to only move right within _bt_search() when
2073  * an interrupted page split is detected (i.e. when the incomplete split
2074  * bit is found to be set), but for now we accept the possibility that
2075  * that could conceal an inconsistency.
2076  */
2077  Assert(state->readonly && state->rootdescend);
2078  exists = false;
2079  stack = _bt_search(state->rel, key, &lbuf, BT_READ, NULL);
2080 
2081  if (BufferIsValid(lbuf))
2082  {
2083  BTInsertStateData insertstate;
2084  OffsetNumber offnum;
2085  Page page;
2086 
2087  insertstate.itup = itup;
2088  insertstate.itemsz = MAXALIGN(IndexTupleSize(itup));
2089  insertstate.itup_key = key;
2090  insertstate.bounds_valid = false;
2091  insertstate.buf = lbuf;
2092 
2093  /* Get matching tuple on leaf page */
2094  offnum = _bt_binsrch_insert(state->rel, &insertstate);
2095  /* Compare first >= matching item on leaf page, if any */
2096  page = BufferGetPage(lbuf);
2097  if (offnum <= PageGetMaxOffsetNumber(page) &&
2098  _bt_compare(state->rel, key, page, offnum) == 0)
2099  exists = true;
2100  _bt_relbuf(state->rel, lbuf);
2101  }
2102 
2103  _bt_freestack(stack);
2104  pfree(key);
2105 
2106  return exists;
2107 }
2108 
2109 /*
2110  * Is particular offset within page (whose special state is passed by caller)
2111  * the page negative-infinity item?
2112  *
2113  * As noted in comments above _bt_compare(), there is special handling of the
2114  * first data item as a "negative infinity" item. The hard-coding within
2115  * _bt_compare() makes comparing this item for the purposes of verification
2116  * pointless at best, since the IndexTuple only contains a valid TID (a
2117  * reference TID to child page).
2118  */
2119 static inline bool
2121 {
2122  /*
2123  * For internal pages only, the first item after high key, if any, is
2124  * negative infinity item. Internal pages always have a negative infinity
2125  * item, whereas leaf pages never have one. This implies that negative
2126  * infinity item is either first or second line item, or there is none
2127  * within page.
2128  *
2129  * Negative infinity items are a special case among pivot tuples. They
2130  * always have zero attributes, while all other pivot tuples always have
2131  * nkeyatts attributes.
2132  *
2133  * Right-most pages don't have a high key, but could be said to
2134  * conceptually have a "positive infinity" high key. Thus, there is a
2135  * symmetry between down link items in parent pages, and high keys in
2136  * children. Together, they represent the part of the key space that
2137  * belongs to each page in the index. For example, all children of the
2138  * root page will have negative infinity as a lower bound from root
2139  * negative infinity downlink, and positive infinity as an upper bound
2140  * (implicitly, from "imaginary" positive infinity high key in root).
2141  */
2142  return !P_ISLEAF(opaque) && offset == P_FIRSTDATAKEY(opaque);
2143 }
2144 
2145 /*
2146  * Does the invariant hold that the key is strictly less than a given upper
2147  * bound offset item?
2148  *
2149  * Verifies line pointer on behalf of caller.
2150  *
2151  * If this function returns false, convention is that caller throws error due
2152  * to corruption.
2153  */
2154 static inline bool
2156  OffsetNumber upperbound)
2157 {
2158  ItemId itemid;
2159  int32 cmp;
2160 
2161  Assert(key->pivotsearch);
2162 
2163  /* Verify line pointer before checking tuple */
2164  itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
2165  upperbound);
2166  /* pg_upgrade'd indexes may legally have equal sibling tuples */
2167  if (!key->heapkeyspace)
2168  return invariant_leq_offset(state, key, upperbound);
2169 
2170  cmp = _bt_compare(state->rel, key, state->target, upperbound);
2171 
2172  /*
2173  * _bt_compare() is capable of determining that a scankey with a
2174  * filled-out attribute is greater than pivot tuples where the comparison
2175  * is resolved at a truncated attribute (value of attribute in pivot is
2176  * minus infinity). However, it is not capable of determining that a
2177  * scankey is _less than_ a tuple on the basis of a comparison resolved at
2178  * _scankey_ minus infinity attribute. Complete an extra step to simulate
2179  * having minus infinity values for omitted scankey attribute(s).
2180  */
2181  if (cmp == 0)
2182  {
2183  BTPageOpaque topaque;
2184  IndexTuple ritup;
2185  int uppnkeyatts;
2186  ItemPointer rheaptid;
2187  bool nonpivot;
2188 
2189  ritup = (IndexTuple) PageGetItem(state->target, itemid);
2190  topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
2191  nonpivot = P_ISLEAF(topaque) && upperbound >= P_FIRSTDATAKEY(topaque);
2192 
2193  /* Get number of keys + heap TID for item to the right */
2194  uppnkeyatts = BTreeTupleGetNKeyAtts(ritup, state->rel);
2195  rheaptid = BTreeTupleGetHeapTIDCareful(state, ritup, nonpivot);
2196 
2197  /* Heap TID is tiebreaker key attribute */
2198  if (key->keysz == uppnkeyatts)
2199  return key->scantid == NULL && rheaptid != NULL;
2200 
2201  return key->keysz < uppnkeyatts;
2202  }
2203 
2204  return cmp < 0;
2205 }
2206 
2207 /*
2208  * Does the invariant hold that the key is less than or equal to a given upper
2209  * bound offset item?
2210  *
2211  * Caller should have verified that upperbound's line pointer is consistent
2212  * using PageGetItemIdCareful() call.
2213  *
2214  * If this function returns false, convention is that caller throws error due
2215  * to corruption.
2216  */
2217 static inline bool
2219  OffsetNumber upperbound)
2220 {
2221  int32 cmp;
2222 
2223  Assert(key->pivotsearch);
2224 
2225  cmp = _bt_compare(state->rel, key, state->target, upperbound);
2226 
2227  return cmp <= 0;
2228 }
2229 
2230 /*
2231  * Does the invariant hold that the key is strictly greater than a given lower
2232  * bound offset item?
2233  *
2234  * Caller should have verified that lowerbound's line pointer is consistent
2235  * using PageGetItemIdCareful() call.
2236  *
2237  * If this function returns false, convention is that caller throws error due
2238  * to corruption.
2239  */
2240 static inline bool
2242  OffsetNumber lowerbound)
2243 {
2244  int32 cmp;
2245 
2246  Assert(key->pivotsearch);
2247 
2248  cmp = _bt_compare(state->rel, key, state->target, lowerbound);
2249 
2250  /* pg_upgrade'd indexes may legally have equal sibling tuples */
2251  if (!key->heapkeyspace)
2252  return cmp >= 0;
2253 
2254  /*
2255  * No need to consider the possibility that scankey has attributes that we
2256  * need to force to be interpreted as negative infinity. _bt_compare() is
2257  * able to determine that scankey is greater than negative infinity. The
2258  * distinction between "==" and "<" isn't interesting here, since
2259  * corruption is indicated either way.
2260  */
2261  return cmp > 0;
2262 }
2263 
2264 /*
2265  * Does the invariant hold that the key is strictly less than a given upper
2266  * bound offset item, with the offset relating to a caller-supplied page that
2267  * is not the current target page?
2268  *
2269  * Caller's non-target page is a child page of the target, checked as part of
2270  * checking a property of the target page (i.e. the key comes from the
2271  * target). Verifies line pointer on behalf of caller.
2272  *
2273  * If this function returns false, convention is that caller throws error due
2274  * to corruption.
2275  */
2276 static inline bool
2278  BlockNumber nontargetblock, Page nontarget,
2279  OffsetNumber upperbound)
2280 {
2281  ItemId itemid;
2282  int32 cmp;
2283 
2284  Assert(key->pivotsearch);
2285 
2286  /* Verify line pointer before checking tuple */
2287  itemid = PageGetItemIdCareful(state, nontargetblock, nontarget,
2288  upperbound);
2289  cmp = _bt_compare(state->rel, key, nontarget, upperbound);
2290 
2291  /* pg_upgrade'd indexes may legally have equal sibling tuples */
2292  if (!key->heapkeyspace)
2293  return cmp <= 0;
2294 
2295  /* See invariant_l_offset() for an explanation of this extra step */
2296  if (cmp == 0)
2297  {
2298  IndexTuple child;
2299  int uppnkeyatts;
2300  ItemPointer childheaptid;
2301  BTPageOpaque copaque;
2302  bool nonpivot;
2303 
2304  child = (IndexTuple) PageGetItem(nontarget, itemid);
2305  copaque = (BTPageOpaque) PageGetSpecialPointer(nontarget);
2306  nonpivot = P_ISLEAF(copaque) && upperbound >= P_FIRSTDATAKEY(copaque);
2307 
2308  /* Get number of keys + heap TID for child/non-target item */
2309  uppnkeyatts = BTreeTupleGetNKeyAtts(child, state->rel);
2310  childheaptid = BTreeTupleGetHeapTIDCareful(state, child, nonpivot);
2311 
2312  /* Heap TID is tiebreaker key attribute */
2313  if (key->keysz == uppnkeyatts)
2314  return key->scantid == NULL && childheaptid != NULL;
2315 
2316  return key->keysz < uppnkeyatts;
2317  }
2318 
2319  return cmp < 0;
2320 }
2321 
2322 /*
2323  * Given a block number of a B-Tree page, return page in palloc()'d memory.
2324  * While at it, perform some basic checks of the page.
2325  *
2326  * There is never an attempt to get a consistent view of multiple pages using
2327  * multiple concurrent buffer locks; in general, we only acquire a single pin
2328  * and buffer lock at a time, which is often all that the nbtree code requires.
2329  *
2330  * Operating on a copy of the page is useful because it prevents control
2331  * getting stuck in an uninterruptible state when an underlying operator class
2332  * misbehaves.
2333  */
2334 static Page
2336 {
2337  Buffer buffer;
2338  Page page;
2339  BTPageOpaque opaque;
2340  OffsetNumber maxoffset;
2341 
2342  page = palloc(BLCKSZ);
2343 
2344  /*
2345  * We copy the page into local storage to avoid holding pin on the buffer
2346  * longer than we must.
2347  */
2348  buffer = ReadBufferExtended(state->rel, MAIN_FORKNUM, blocknum, RBM_NORMAL,
2349  state->checkstrategy);
2350  LockBuffer(buffer, BT_READ);
2351 
2352  /*
2353  * Perform the same basic sanity checking that nbtree itself performs for
2354  * every page:
2355  */
2356  _bt_checkpage(state->rel, buffer);
2357 
2358  /* Only use copy of page in palloc()'d memory */
2359  memcpy(page, BufferGetPage(buffer), BLCKSZ);
2360  UnlockReleaseBuffer(buffer);
2361 
2362  opaque = (BTPageOpaque) PageGetSpecialPointer(page);
2363 
2364  if (P_ISMETA(opaque) && blocknum != BTREE_METAPAGE)
2365  ereport(ERROR,
2366  (errcode(ERRCODE_INDEX_CORRUPTED),
2367  errmsg("invalid meta page found at block %u in index \"%s\"",
2368  blocknum, RelationGetRelationName(state->rel))));
2369 
2370  /* Check page from block that ought to be meta page */
2371  if (blocknum == BTREE_METAPAGE)
2372  {
2373  BTMetaPageData *metad = BTPageGetMeta(page);
2374 
2375  if (!P_ISMETA(opaque) ||
2376  metad->btm_magic != BTREE_MAGIC)
2377  ereport(ERROR,
2378  (errcode(ERRCODE_INDEX_CORRUPTED),
2379  errmsg("index \"%s\" meta page is corrupt",
2380  RelationGetRelationName(state->rel))));
2381 
2382  if (metad->btm_version < BTREE_MIN_VERSION ||
2383  metad->btm_version > BTREE_VERSION)
2384  ereport(ERROR,
2385  (errcode(ERRCODE_INDEX_CORRUPTED),
2386  errmsg("version mismatch in index \"%s\": file version %d, "
2387  "current version %d, minimum supported version %d",
2388  RelationGetRelationName(state->rel),
2389  metad->btm_version, BTREE_VERSION,
2390  BTREE_MIN_VERSION)));
2391 
2392  /* Finished with metapage checks */
2393  return page;
2394  }
2395 
2396  /*
2397  * Deleted pages have no sane "level" field, so can only check non-deleted
2398  * page level
2399  */
2400  if (P_ISLEAF(opaque) && !P_ISDELETED(opaque) && opaque->btpo.level != 0)
2401  ereport(ERROR,
2402  (errcode(ERRCODE_INDEX_CORRUPTED),
2403  errmsg("invalid leaf page level %u for block %u in index \"%s\"",
2404  opaque->btpo.level, blocknum, RelationGetRelationName(state->rel))));
2405 
2406  if (!P_ISLEAF(opaque) && !P_ISDELETED(opaque) &&
2407  opaque->btpo.level == 0)
2408  ereport(ERROR,
2409  (errcode(ERRCODE_INDEX_CORRUPTED),
2410  errmsg("invalid internal page level 0 for block %u in index \"%s\"",
2411  blocknum, RelationGetRelationName(state->rel))));
2412 
2413  /*
2414  * Sanity checks for number of items on page.
2415  *
2416  * As noted at the beginning of _bt_binsrch(), an internal page must have
2417  * children, since there must always be a negative infinity downlink
2418  * (there may also be a highkey). In the case of non-rightmost leaf
2419  * pages, there must be at least a highkey.
2420  *
2421  * This is correct when pages are half-dead, since internal pages are
2422  * never half-dead, and leaf pages must have a high key when half-dead
2423  * (the rightmost page can never be deleted). It's also correct with
2424  * fully deleted pages: _bt_unlink_halfdead_page() doesn't change anything
2425  * about the target page other than setting the page as fully dead, and
2426  * setting its xact field. In particular, it doesn't change the sibling
2427  * links in the deletion target itself, since they're required when index
2428  * scans land on the deletion target, and then need to move right (or need
2429  * to move left, in the case of backward index scans).
2430  */
2431  maxoffset = PageGetMaxOffsetNumber(page);
2432  if (maxoffset > MaxIndexTuplesPerPage)
2433  ereport(ERROR,
2434  (errcode(ERRCODE_INDEX_CORRUPTED),
2435  errmsg("Number of items on block %u of index \"%s\" exceeds MaxIndexTuplesPerPage (%u)",
2436  blocknum, RelationGetRelationName(state->rel),
2438 
2439  if (!P_ISLEAF(opaque) && maxoffset < P_FIRSTDATAKEY(opaque))
2440  ereport(ERROR,
2441  (errcode(ERRCODE_INDEX_CORRUPTED),
2442  errmsg("internal block %u in index \"%s\" lacks high key and/or at least one downlink",
2443  blocknum, RelationGetRelationName(state->rel))));
2444 
2445  if (P_ISLEAF(opaque) && !P_RIGHTMOST(opaque) && maxoffset < P_HIKEY)
2446  ereport(ERROR,
2447  (errcode(ERRCODE_INDEX_CORRUPTED),
2448  errmsg("non-rightmost leaf block %u in index \"%s\" lacks high key item",
2449  blocknum, RelationGetRelationName(state->rel))));
2450 
2451  /*
2452  * In general, internal pages are never marked half-dead, except on
2453  * versions of Postgres prior to 9.4, where it can be valid transient
2454  * state. This state is nonetheless treated as corruption by VACUUM on
2455  * from version 9.4 on, so do the same here. See _bt_pagedel() for full
2456  * details.
2457  *
2458  * Internal pages should never have garbage items, either.
2459  */
2460  if (!P_ISLEAF(opaque) && P_ISHALFDEAD(opaque))
2461  ereport(ERROR,
2462  (errcode(ERRCODE_INDEX_CORRUPTED),
2463  errmsg("internal page block %u in index \"%s\" is half-dead",
2464  blocknum, RelationGetRelationName(state->rel)),
2465  errhint("This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it.")));
2466 
2467  if (!P_ISLEAF(opaque) && P_HAS_GARBAGE(opaque))
2468  ereport(ERROR,
2469  (errcode(ERRCODE_INDEX_CORRUPTED),
2470  errmsg("internal page block %u in index \"%s\" has garbage items",
2471  blocknum, RelationGetRelationName(state->rel))));
2472 
2473  return page;
2474 }
2475 
2476 /*
2477  * _bt_mkscankey() wrapper that automatically prevents insertion scankey from
2478  * being considered greater than the pivot tuple that its values originated
2479  * from (or some other identical pivot tuple) in the common case where there
2480  * are truncated/minus infinity attributes. Without this extra step, there
2481  * are forms of corruption that amcheck could theoretically fail to report.
2482  *
2483  * For example, invariant_g_offset() might miss a cross-page invariant failure
2484  * on an internal level if the scankey built from the first item on the
2485  * target's right sibling page happened to be equal to (not greater than) the
2486  * last item on target page. The !pivotsearch tiebreaker in _bt_compare()
2487  * might otherwise cause amcheck to assume (rather than actually verify) that
2488  * the scankey is greater.
2489  */
2490 static inline BTScanInsert
2492 {
2493  BTScanInsert skey;
2494 
2495  skey = _bt_mkscankey(rel, itup);
2496  skey->pivotsearch = true;
2497 
2498  return skey;
2499 }
2500 
2501 /*
2502  * PageGetItemId() wrapper that validates returned line pointer.
2503  *
2504  * Buffer page/page item access macros generally trust that line pointers are
2505  * not corrupt, which might cause problems for verification itself. For
2506  * example, there is no bounds checking in PageGetItem(). Passing it a
2507  * corrupt line pointer can cause it to return a tuple/pointer that is unsafe
2508  * to dereference.
2509  *
2510  * Validating line pointers before tuples avoids undefined behavior and
2511  * assertion failures with corrupt indexes, making the verification process
2512  * more robust and predictable.
2513  */
2514 static ItemId
2516  OffsetNumber offset)
2517 {
2518  ItemId itemid = PageGetItemId(page, offset);
2519 
2520  if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) >
2521  BLCKSZ - sizeof(BTPageOpaqueData))
2522  ereport(ERROR,
2523  (errcode(ERRCODE_INDEX_CORRUPTED),
2524  errmsg("line pointer points past end of tuple space in index \"%s\"",
2525  RelationGetRelationName(state->rel)),
2526  errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
2527  block, offset, ItemIdGetOffset(itemid),
2528  ItemIdGetLength(itemid),
2529  ItemIdGetFlags(itemid))));
2530 
2531  /*
2532  * Verify that line pointer isn't LP_REDIRECT or LP_UNUSED, since nbtree
2533  * never uses either. Verify that line pointer has storage, too, since
2534  * even LP_DEAD items should within nbtree.
2535  */
2536  if (ItemIdIsRedirected(itemid) || !ItemIdIsUsed(itemid) ||
2537  ItemIdGetLength(itemid) == 0)
2538  ereport(ERROR,
2539  (errcode(ERRCODE_INDEX_CORRUPTED),
2540  errmsg("invalid line pointer storage in index \"%s\"",
2541  RelationGetRelationName(state->rel)),
2542  errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
2543  block, offset, ItemIdGetOffset(itemid),
2544  ItemIdGetLength(itemid),
2545  ItemIdGetFlags(itemid))));
2546 
2547  return itemid;
2548 }
2549 
2550 /*
2551  * BTreeTupleGetHeapTID() wrapper that lets caller enforce that a heap TID must
2552  * be present in cases where that is mandatory.
2553  *
2554  * This doesn't add much as of BTREE_VERSION 4, since the INDEX_ALT_TID_MASK
2555  * bit is effectively a proxy for whether or not the tuple is a pivot tuple.
2556  * It may become more useful in the future, when non-pivot tuples support their
2557  * own alternative INDEX_ALT_TID_MASK representation.
2558  */
2559 static inline ItemPointer
2561  bool nonpivot)
2562 {
2563  ItemPointer result = BTreeTupleGetHeapTID(itup);
2565 
2566  if (result == NULL && nonpivot)
2567  ereport(ERROR,
2568  (errcode(ERRCODE_INDEX_CORRUPTED),
2569  errmsg("block %u or its right sibling block or child block in index \"%s\" contains non-pivot tuple that lacks a heap TID",
2570  targetblock, RelationGetRelationName(state->rel))));
2571 
2572  return result;
2573 }
PG_MODULE_MAGIC
Definition: verify_nbtree.c:43
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
#define ItemPointerGetOffsetNumberNoCheck(pointer)
Definition: itemptr.h:108
bloom_filter * bloom_create(int64 total_elems, int bloom_work_mem, uint64 seed)
Definition: bloomfilter.c:88
Oid IndexGetRelation(Oid indexId, bool missing_ok)
Definition: index.c:3275
#define VARATT_IS_COMPRESSED(PTR)
Definition: postgres.h:312
static bool invariant_leq_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound)
void _bt_freestack(BTStack stack)
Definition: nbtutils.c:163
BlockNumber targetblock
Definition: verify_nbtree.c:93
BlockNumber btpo_next
Definition: nbtree.h:58
static bool btree_index_mainfork_expected(Relation rel)
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:211
#define AllocSetContextCreate
Definition: memutils.h:169
#define DEBUG1
Definition: elog.h:25
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:133
int errhint(const char *fmt,...)
Definition: elog.c:974
#define ERRCODE_UNDEFINED_TABLE
Definition: pgbench.c:72
Datum bt_index_parent_check(PG_FUNCTION_ARGS)
static void btree_index_checkable(Relation rel)
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define P_IGNORE(opaque)
Definition: nbtree.h:194
#define BTreeInnerTupleGetDownLink(itup)
Definition: nbtree.h:301
bool bounds_valid
Definition: nbtree.h:507
static BtreeLevel bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:865
uint32 btm_version
Definition: nbtree.h:100
#define RelationGetDescr(relation)
Definition: rel.h:442
int LOCKMODE
Definition: lockdefs.h:26
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:85
ItemPointer scantid
Definition: nbtree.h:475
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
uint32 level
bloom_filter * downlinkfilter
#define PointerGetDatum(X)
Definition: postgres.h:556
#define BTREE_VERSION
Definition: nbtree.h:133
void bloom_add_element(bloom_filter *filter, unsigned char *elem, size_t len)
Definition: bloomfilter.c:136
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
long random(void)
Definition: random.c:22
struct SMgrRelationData * rd_smgr
Definition: rel.h:56
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:219
uint32 btm_magic
Definition: nbtree.h:99
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:642
static IndexTuple bt_normalize_tuple(BtreeCheckState *state, IndexTuple itup)
ItemPointerData t_tid
Definition: itup.h:37
union BTPageOpaqueData::@46 btpo
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define BTreeTupleGetHeapTID(itup)
Definition: nbtree.h:346
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define IsolationUsesXactSnapshot()
Definition: xact.h:51
#define P_NONE
Definition: nbtree.h:181
#define AccessShareLock
Definition: lockdefs.h:36
#define BTMaxItemSizeNoHeapTid(page)
Definition: nbtree.h:153
Oid * ii_ExclusionProcs
Definition: execnodes.h:165
#define P_HAS_GARBAGE(opaque)
Definition: nbtree.h:195
int errcode(int sqlerrcode)
Definition: elog.c:570
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:136
static BTScanInsert bt_right_page_check_scankey(BtreeCheckState *state)
uint32 BlockNumber
Definition: block.h:31
#define P_INCOMPLETE_SPLIT(opaque)
Definition: nbtree.h:196
#define PG_GETARG_BOOL(n)
Definition: fmgr.h:269
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:2230
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:247
Form_pg_class rd_rel
Definition: rel.h:83
unsigned int Oid
Definition: postgres_ext.h:31
bool RecoveryInProgress(void)
Definition: xlog.c:7898
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define BTreeTupleGetNAtts(itup, rel)
Definition: nbtree.h:327
BufferAccessStrategy checkstrategy
Definition: verify_nbtree.c:84
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:306
#define OidIsValid(objectId)
Definition: c.h:638
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:68
static TableScanDesc table_beginscan_strat(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key, bool allow_strat, bool allow_sync)
Definition: tableam.h:760
bloom_filter * filter
signed int int32
Definition: c.h:346
int errdetail_internal(const char *fmt,...)
Definition: elog.c:887
struct HeapTupleData * rd_indextuple
Definition: rel.h:145
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
static bool invariant_l_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound)
Definition: type.h:89
#define P_ISMETA(opaque)
Definition: nbtree.h:192
static void bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed, bool rootdescend)
static bool invariant_g_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber lowerbound)
#define RelationOpenSmgr(relation)
Definition: rel.h:473
#define VARATT_IS_EXTERNAL(PTR)
Definition: postgres.h:313
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:47
#define BT_READ
Definition: nbtree.h:402
XLogRecPtr targetlsn
Definition: verify_nbtree.c:95
Form_pg_index rd_index
Definition: rel.h:143
BlockNumber btm_fastroot
Definition: nbtree.h:103
void pfree(void *pointer)
Definition: mcxt.c:1031
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define BTREE_MAGIC
Definition: nbtree.h:132
void _bt_checkpage(Relation rel, Buffer buf)
Definition: nbtpage.c:690
#define P_ISHALFDEAD(opaque)
Definition: nbtree.h:193
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3376
#define ERROR
Definition: elog.h:43
static double table_index_build_scan(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, bool allow_sync, bool progress, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition: tableam.h:1499
ItemPointerData t_self
Definition: htup.h:65
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:191
#define DEBUG2
Definition: elog.h:24
#define BTPageGetMeta(p)
Definition: nbtree.h:112
BlockNumber btpo_prev
Definition: nbtree.h:57
Datum bt_index_check(PG_FUNCTION_ARGS)
#define PG_GETARG_OID(n)
Definition: fmgr.h:270
static void bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace, bool readonly, bool heapallindexed, bool rootdescend)
TransactionId RecentGlobalXmin
Definition: snapmgr.c:168
OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate)
Definition: nbtsearch.c:440
BlockNumber leftmost
IndexTupleData * IndexTuple
Definition: itup.h:53
int errdetail(const char *fmt,...)
Definition: elog.c:860
#define RelationGetRelationName(relation)
Definition: rel.h:450
#define P_LEFTMOST(opaque)
Definition: nbtree.h:187
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:200
unsigned int uint32
Definition: c.h:358
static void bt_downlink_missing_check(BtreeCheckState *state)
#define ItemIdGetOffset(itemId)
Definition: itemid.h:65
TransactionId xmin
Definition: snapshot.h:157
MemoryContext CurrentMemoryContext
Definition: mcxt.c:38
IndexTuple itup
Definition: nbtree.h:495
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:554
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define ereport(elevel, rest)
Definition: elog.h:141
static ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state, IndexTuple itup, bool nonpivot)
#define BTREE_METAPAGE
Definition: nbtree.h:131
bool pivotsearch
Definition: nbtree.h:474
#define P_ISDELETED(opaque)
Definition: nbtree.h:191
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define P_ISROOT(opaque)
Definition: nbtree.h:190
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:907
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:44
void bloom_free(bloom_filter *filter)
Definition: bloomfilter.c:127
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
Relation heaprel
Definition: verify_nbtree.c:72
uint32 btm_fastlevel
Definition: nbtree.h:104
uint32 level
Definition: nbtree.h:61
static bool invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, BlockNumber nontargetblock, Page nontarget, OffsetNumber upperbound)
#define ItemIdGetFlags(itemId)
Definition: itemid.h:71
void * palloc0(Size size)
Definition: mcxt.c:955
uintptr_t Datum
Definition: postgres.h:367
static BTScanInsert bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup)
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3590
BlockNumber btm_root
Definition: nbtree.h:101
int work_mem
Definition: globals.c:121
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:198
#define BTREE_MIN_VERSION
Definition: nbtree.h:134
static void bt_downlink_check(BtreeCheckState *state, BTScanInsert targetkey, BlockNumber childblock)
bool _bt_heapkeyspace(Relation rel)
Definition: nbtpage.c:636
static void bt_tuple_present_callback(Relation index, HeapTuple htup, Datum *values, bool *isnull, bool tupleIsAlive, void *checkstate)
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:912
int maintenance_work_mem
Definition: globals.c:122
#define NOTICE
Definition: elog.h:37
#define PG_RETURN_VOID()
Definition: fmgr.h:339
int errmsg_internal(const char *fmt,...)
Definition: elog.c:814
bool ii_Unique
Definition: execnodes.h:170
#define Max(x, y)
Definition: c.h:898
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition: nbtutils.c:2377
uint64 XLogRecPtr
Definition: xlogdefs.h:21
BTScanInsert itup_key
Definition: nbtree.h:497
#define Assert(condition)
Definition: c.h:732
Definition: regguts.h:298
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:546
#define BTreeTupleGetNKeyAtts(itup, rel)
Definition: verify_nbtree.c:50
static void bt_target_page_check(BtreeCheckState *state)
#define BTreeTupleGetTopParent(itup)
Definition: nbtree.h:312
struct BtreeLevel BtreeLevel
bool heapkeyspace
Definition: nbtree.h:471
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define INDEX_MAX_KEYS
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
#define InvalidBlockNumber
Definition: block.h:33
#define MAXALIGN(LEN)
Definition: c.h:685
#define BufferIsValid(bufnum)
Definition: bufmgr.h:113
#define index_getattr(tup, attnum, tupleDesc, isnull)
Definition: itup.h:100
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define PG_NARGS()
Definition: fmgr.h:198
#define IndexTupleHasVarwidths(itup)
Definition: itup.h:73
bool ii_Concurrent
Definition: execnodes.h:172
#define INT64_FORMAT
Definition: c.h:400
#define SnapshotAny
Definition: snapmgr.h:69
double bloom_prop_bits_set(bloom_filter *filter)
Definition: bloomfilter.c:188
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:152
#define PageGetLSN(page)
Definition: bufpage.h:366
#define DatumGetPointer(X)
Definition: postgres.h:549
#define BTMaxItemSize(page)
Definition: nbtree.h:147
#define P_HIKEY
Definition: nbtree.h:217
static bool bt_rootdescend(BtreeCheckState *state, IndexTuple itup)
static Datum values[MAXATTR]
Definition: bootstrap.c:167
Oid * ii_ExclusionOps
Definition: execnodes.h:164
#define MaxIndexTuplesPerPage
Definition: itup.h:145
void * palloc(Size size)
Definition: mcxt.c:924
int errmsg(const char *fmt,...)
Definition: elog.c:784
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, OffsetNumber offset)
#define ItemPointerGetBlockNumberNoCheck(pointer)
Definition: itemptr.h:89
uint32 btm_level
Definition: nbtree.h:102
#define elog(elevel,...)
Definition: elog.h:226
#define ShareLock
Definition: lockdefs.h:41
int i
bool istruerootlevel
#define PG_DETOAST_DATUM(datum)
Definition: fmgr.h:235
#define PG_FUNCTION_ARGS
Definition: fmgr.h:188
static bool offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset)
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define InvalidBtreeLevel
Definition: verify_nbtree.c:49
#define TransactionIdIsValid(xid)
Definition: transam.h:41
PG_FUNCTION_INFO_V1(bt_index_check)
uint16 * ii_ExclusionStrats
Definition: execnodes.h:166
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:39
MemoryContext targetcontext
Definition: verify_nbtree.c:82
int Buffer
Definition: buf.h:23
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:188
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:126
BTStack _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access, Snapshot snapshot)
Definition: nbtsearch.c:92
bool bloom_lacks_element(bloom_filter *filter, unsigned char *elem, size_t len)
Definition: bloomfilter.c:158
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define IndexTupleSize(itup)
Definition: itup.h:71
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:742
#define P_ISLEAF(opaque)
Definition: nbtree.h:189
struct BtreeCheckState BtreeCheckState