PostgreSQL Source Code  git master
nbtsearch.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nbtsearch.c
4  * Search code for postgres btrees.
5  *
6  *
7  * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * IDENTIFICATION
11  * src/backend/access/nbtree/nbtsearch.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 
16 #include "postgres.h"
17 
18 #include "access/nbtree.h"
19 #include "access/relscan.h"
20 #include "miscadmin.h"
21 #include "pgstat.h"
22 #include "storage/predicate.h"
23 #include "utils/lsyscache.h"
24 #include "utils/rel.h"
25 
26 
29 static int _bt_binsrch_posting(BTScanInsert key, Page page,
30  OffsetNumber offnum);
31 static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir,
32  OffsetNumber offnum);
33 static void _bt_saveitem(BTScanOpaque so, int itemIndex,
34  OffsetNumber offnum, IndexTuple itup);
35 static int _bt_setuppostingitems(BTScanOpaque so, int itemIndex,
36  OffsetNumber offnum, ItemPointer heapTid,
37  IndexTuple itup);
38 static inline void _bt_savepostingitem(BTScanOpaque so, int itemIndex,
39  OffsetNumber offnum,
40  ItemPointer heapTid, int tupleOffset);
41 static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir);
42 static bool _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir);
43 static bool _bt_parallel_readpage(IndexScanDesc scan, BlockNumber blkno,
44  ScanDirection dir);
45 static Buffer _bt_walk_left(Relation rel, Buffer buf, Snapshot snapshot);
46 static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
47 static inline void _bt_initialize_more_data(BTScanOpaque so, ScanDirection dir);
48 
49 
50 /*
51  * _bt_drop_lock_and_maybe_pin()
52  *
53  * Unlock the buffer; and if it is safe to release the pin, do that, too.
54  * This will prevent vacuum from stalling in a blocked state trying to read a
55  * page when a cursor is sitting on it.
56  *
57  * See nbtree/README section on making concurrent TID recycling safe.
58  */
59 static void
61 {
62  _bt_unlockbuf(scan->indexRelation, sp->buf);
63 
64  if (IsMVCCSnapshot(scan->xs_snapshot) &&
66  !scan->xs_want_itup)
67  {
68  ReleaseBuffer(sp->buf);
69  sp->buf = InvalidBuffer;
70  }
71 }
72 
73 /*
74  * _bt_search() -- Search the tree for a particular scankey,
75  * or more precisely for the first leaf page it could be on.
76  *
77  * The passed scankey is an insertion-type scankey (see nbtree/README),
78  * but it can omit the rightmost column(s) of the index.
79  *
80  * Return value is a stack of parent-page pointers (i.e. there is no entry for
81  * the leaf level/page). *bufP is set to the address of the leaf-page buffer,
82  * which is locked and pinned. No locks are held on the parent pages,
83  * however!
84  *
85  * If the snapshot parameter is not NULL, "old snapshot" checking will take
86  * place during the descent through the tree. This is not needed when
87  * positioning for an insert or delete, so NULL is used for those cases.
88  *
89  * The returned buffer is locked according to access parameter. Additionally,
90  * access = BT_WRITE will allow an empty root page to be created and returned.
91  * When access = BT_READ, an empty index will result in *bufP being set to
92  * InvalidBuffer. Also, in BT_WRITE mode, any incomplete splits encountered
93  * during the search will be finished.
94  */
95 BTStack
97  Snapshot snapshot)
98 {
99  BTStack stack_in = NULL;
100  int page_access = BT_READ;
101 
102  /* Get the root page to start with */
103  *bufP = _bt_getroot(rel, access);
104 
105  /* If index is empty and access = BT_READ, no root page is created. */
106  if (!BufferIsValid(*bufP))
107  return (BTStack) NULL;
108 
109  /* Loop iterates once per level descended in the tree */
110  for (;;)
111  {
112  Page page;
113  BTPageOpaque opaque;
114  OffsetNumber offnum;
115  ItemId itemid;
116  IndexTuple itup;
117  BlockNumber child;
118  BTStack new_stack;
119 
120  /*
121  * Race -- the page we just grabbed may have split since we read its
122  * downlink in its parent page (or the metapage). If it has, we may
123  * need to move right to its new sibling. Do that.
124  *
125  * In write-mode, allow _bt_moveright to finish any incomplete splits
126  * along the way. Strictly speaking, we'd only need to finish an
127  * incomplete split on the leaf page we're about to insert to, not on
128  * any of the upper levels (internal pages with incomplete splits are
129  * also taken care of in _bt_getstackbuf). But this is a good
130  * opportunity to finish splits of internal pages too.
131  */
132  *bufP = _bt_moveright(rel, key, *bufP, (access == BT_WRITE), stack_in,
133  page_access, snapshot);
134 
135  /* if this is a leaf page, we're done */
136  page = BufferGetPage(*bufP);
137  opaque = BTPageGetOpaque(page);
138  if (P_ISLEAF(opaque))
139  break;
140 
141  /*
142  * Find the appropriate pivot tuple on this page. Its downlink points
143  * to the child page that we're about to descend to.
144  */
145  offnum = _bt_binsrch(rel, key, *bufP);
146  itemid = PageGetItemId(page, offnum);
147  itup = (IndexTuple) PageGetItem(page, itemid);
148  Assert(BTreeTupleIsPivot(itup) || !key->heapkeyspace);
149  child = BTreeTupleGetDownLink(itup);
150 
151  /*
152  * We need to save the location of the pivot tuple we chose in a new
153  * stack entry for this page/level. If caller ends up splitting a
154  * page one level down, it usually ends up inserting a new pivot
155  * tuple/downlink immediately after the location recorded here.
156  */
157  new_stack = (BTStack) palloc(sizeof(BTStackData));
158  new_stack->bts_blkno = BufferGetBlockNumber(*bufP);
159  new_stack->bts_offset = offnum;
160  new_stack->bts_parent = stack_in;
161 
162  /*
163  * Page level 1 is lowest non-leaf page level prior to leaves. So, if
164  * we're on the level 1 and asked to lock leaf page in write mode,
165  * then lock next page in write mode, because it must be a leaf.
166  */
167  if (opaque->btpo_level == 1 && access == BT_WRITE)
168  page_access = BT_WRITE;
169 
170  /* drop the read lock on the page, then acquire one on its child */
171  *bufP = _bt_relandgetbuf(rel, *bufP, child, page_access);
172 
173  /* okay, all set to move down a level */
174  stack_in = new_stack;
175  }
176 
177  /*
178  * If we're asked to lock leaf in write mode, but didn't manage to, then
179  * relock. This should only happen when the root page is a leaf page (and
180  * the only page in the index other than the metapage).
181  */
182  if (access == BT_WRITE && page_access == BT_READ)
183  {
184  /* trade in our read lock for a write lock */
185  _bt_unlockbuf(rel, *bufP);
186  _bt_lockbuf(rel, *bufP, BT_WRITE);
187 
188  /*
189  * Race -- the leaf page may have split after we dropped the read lock
190  * but before we acquired a write lock. If it has, we may need to
191  * move right to its new sibling. Do that.
192  */
193  *bufP = _bt_moveright(rel, key, *bufP, true, stack_in, BT_WRITE,
194  snapshot);
195  }
196 
197  return stack_in;
198 }
199 
200 /*
201  * _bt_moveright() -- move right in the btree if necessary.
202  *
203  * When we follow a pointer to reach a page, it is possible that
204  * the page has changed in the meanwhile. If this happens, we're
205  * guaranteed that the page has "split right" -- that is, that any
206  * data that appeared on the page originally is either on the page
207  * or strictly to the right of it.
208  *
209  * This routine decides whether or not we need to move right in the
210  * tree by examining the high key entry on the page. If that entry is
211  * strictly less than the scankey, or <= the scankey in the
212  * key.nextkey=true case, then we followed the wrong link and we need
213  * to move right.
214  *
215  * The passed insertion-type scankey can omit the rightmost column(s) of the
216  * index. (see nbtree/README)
217  *
218  * When key.nextkey is false (the usual case), we are looking for the first
219  * item >= key. When key.nextkey is true, we are looking for the first item
220  * strictly greater than key.
221  *
222  * If forupdate is true, we will attempt to finish any incomplete splits
223  * that we encounter. This is required when locking a target page for an
224  * insertion, because we don't allow inserting on a page before the split
225  * is completed. 'stack' is only used if forupdate is true.
226  *
227  * On entry, we have the buffer pinned and a lock of the type specified by
228  * 'access'. If we move right, we release the buffer and lock and acquire
229  * the same on the right sibling. Return value is the buffer we stop at.
230  *
231  * If the snapshot parameter is not NULL, "old snapshot" checking will take
232  * place during the descent through the tree. This is not needed when
233  * positioning for an insert or delete, so NULL is used for those cases.
234  */
235 Buffer
238  Buffer buf,
239  bool forupdate,
240  BTStack stack,
241  int access,
242  Snapshot snapshot)
243 {
244  Page page;
245  BTPageOpaque opaque;
246  int32 cmpval;
247 
248  /*
249  * When nextkey = false (normal case): if the scan key that brought us to
250  * this page is > the high key stored on the page, then the page has split
251  * and we need to move right. (pg_upgrade'd !heapkeyspace indexes could
252  * have some duplicates to the right as well as the left, but that's
253  * something that's only ever dealt with on the leaf level, after
254  * _bt_search has found an initial leaf page.)
255  *
256  * When nextkey = true: move right if the scan key is >= page's high key.
257  * (Note that key.scantid cannot be set in this case.)
258  *
259  * The page could even have split more than once, so scan as far as
260  * needed.
261  *
262  * We also have to move right if we followed a link that brought us to a
263  * dead page.
264  */
265  cmpval = key->nextkey ? 0 : 1;
266 
267  for (;;)
268  {
269  page = BufferGetPage(buf);
270  TestForOldSnapshot(snapshot, rel, page);
271  opaque = BTPageGetOpaque(page);
272 
273  if (P_RIGHTMOST(opaque))
274  break;
275 
276  /*
277  * Finish any incomplete splits we encounter along the way.
278  */
279  if (forupdate && P_INCOMPLETE_SPLIT(opaque))
280  {
282 
283  /* upgrade our lock if necessary */
284  if (access == BT_READ)
285  {
286  _bt_unlockbuf(rel, buf);
287  _bt_lockbuf(rel, buf, BT_WRITE);
288  }
289 
290  if (P_INCOMPLETE_SPLIT(opaque))
291  _bt_finish_split(rel, buf, stack);
292  else
293  _bt_relbuf(rel, buf);
294 
295  /* re-acquire the lock in the right mode, and re-check */
296  buf = _bt_getbuf(rel, blkno, access);
297  continue;
298  }
299 
300  if (P_IGNORE(opaque) || _bt_compare(rel, key, page, P_HIKEY) >= cmpval)
301  {
302  /* step right one page */
303  buf = _bt_relandgetbuf(rel, buf, opaque->btpo_next, access);
304  continue;
305  }
306  else
307  break;
308  }
309 
310  if (P_IGNORE(opaque))
311  elog(ERROR, "fell off the end of index \"%s\"",
313 
314  return buf;
315 }
316 
317 /*
318  * _bt_binsrch() -- Do a binary search for a key on a particular page.
319  *
320  * On a leaf page, _bt_binsrch() returns the OffsetNumber of the first
321  * key >= given scankey, or > scankey if nextkey is true. (NOTE: in
322  * particular, this means it is possible to return a value 1 greater than the
323  * number of keys on the page, if the scankey is > all keys on the page.)
324  *
325  * On an internal (non-leaf) page, _bt_binsrch() returns the OffsetNumber
326  * of the last key < given scankey, or last key <= given scankey if nextkey
327  * is true. (Since _bt_compare treats the first data key of such a page as
328  * minus infinity, there will be at least one key < scankey, so the result
329  * always points at one of the keys on the page.) This key indicates the
330  * right place to descend to be sure we find all leaf keys >= given scankey
331  * (or leaf keys > given scankey when nextkey is true).
332  *
333  * This procedure is not responsible for walking right, it just examines
334  * the given page. _bt_binsrch() has no lock or refcount side effects
335  * on the buffer.
336  */
337 static OffsetNumber
340  Buffer buf)
341 {
342  Page page;
343  BTPageOpaque opaque;
344  OffsetNumber low,
345  high;
346  int32 result,
347  cmpval;
348 
349  page = BufferGetPage(buf);
350  opaque = BTPageGetOpaque(page);
351 
352  /* Requesting nextkey semantics while using scantid seems nonsensical */
353  Assert(!key->nextkey || key->scantid == NULL);
354  /* scantid-set callers must use _bt_binsrch_insert() on leaf pages */
355  Assert(!P_ISLEAF(opaque) || key->scantid == NULL);
356 
357  low = P_FIRSTDATAKEY(opaque);
358  high = PageGetMaxOffsetNumber(page);
359 
360  /*
361  * If there are no keys on the page, return the first available slot. Note
362  * this covers two cases: the page is really empty (no keys), or it
363  * contains only a high key. The latter case is possible after vacuuming.
364  * This can never happen on an internal page, however, since they are
365  * never empty (an internal page must have children).
366  */
367  if (unlikely(high < low))
368  return low;
369 
370  /*
371  * Binary search to find the first key on the page >= scan key, or first
372  * key > scankey when nextkey is true.
373  *
374  * For nextkey=false (cmpval=1), the loop invariant is: all slots before
375  * 'low' are < scan key, all slots at or after 'high' are >= scan key.
376  *
377  * For nextkey=true (cmpval=0), the loop invariant is: all slots before
378  * 'low' are <= scan key, all slots at or after 'high' are > scan key.
379  *
380  * We can fall out when high == low.
381  */
382  high++; /* establish the loop invariant for high */
383 
384  cmpval = key->nextkey ? 0 : 1; /* select comparison value */
385 
386  while (high > low)
387  {
388  OffsetNumber mid = low + ((high - low) / 2);
389 
390  /* We have low <= mid < high, so mid points at a real slot */
391 
392  result = _bt_compare(rel, key, page, mid);
393 
394  if (result >= cmpval)
395  low = mid + 1;
396  else
397  high = mid;
398  }
399 
400  /*
401  * At this point we have high == low, but be careful: they could point
402  * past the last slot on the page.
403  *
404  * On a leaf page, we always return the first key >= scan key (resp. >
405  * scan key), which could be the last slot + 1.
406  */
407  if (P_ISLEAF(opaque))
408  return low;
409 
410  /*
411  * On a non-leaf page, return the last key < scan key (resp. <= scan key).
412  * There must be one if _bt_compare() is playing by the rules.
413  */
414  Assert(low > P_FIRSTDATAKEY(opaque));
415 
416  return OffsetNumberPrev(low);
417 }
418 
419 /*
420  *
421  * _bt_binsrch_insert() -- Cacheable, incremental leaf page binary search.
422  *
423  * Like _bt_binsrch(), but with support for caching the binary search
424  * bounds. Only used during insertion, and only on the leaf page that it
425  * looks like caller will insert tuple on. Exclusive-locked and pinned
426  * leaf page is contained within insertstate.
427  *
428  * Caches the bounds fields in insertstate so that a subsequent call can
429  * reuse the low and strict high bounds of original binary search. Callers
430  * that use these fields directly must be prepared for the case where low
431  * and/or stricthigh are not on the same page (one or both exceed maxoff
432  * for the page). The case where there are no items on the page (high <
433  * low) makes bounds invalid.
434  *
435  * Caller is responsible for invalidating bounds when it modifies the page
436  * before calling here a second time, and for dealing with posting list
437  * tuple matches (callers can use insertstate's postingoff field to
438  * determine which existing heap TID will need to be replaced by a posting
439  * list split).
440  */
443 {
444  BTScanInsert key = insertstate->itup_key;
445  Page page;
446  BTPageOpaque opaque;
447  OffsetNumber low,
448  high,
449  stricthigh;
450  int32 result,
451  cmpval;
452 
453  page = BufferGetPage(insertstate->buf);
454  opaque = BTPageGetOpaque(page);
455 
456  Assert(P_ISLEAF(opaque));
457  Assert(!key->nextkey);
458  Assert(insertstate->postingoff == 0);
459 
460  if (!insertstate->bounds_valid)
461  {
462  /* Start new binary search */
463  low = P_FIRSTDATAKEY(opaque);
464  high = PageGetMaxOffsetNumber(page);
465  }
466  else
467  {
468  /* Restore result of previous binary search against same page */
469  low = insertstate->low;
470  high = insertstate->stricthigh;
471  }
472 
473  /* If there are no keys on the page, return the first available slot */
474  if (unlikely(high < low))
475  {
476  /* Caller can't reuse bounds */
477  insertstate->low = InvalidOffsetNumber;
478  insertstate->stricthigh = InvalidOffsetNumber;
479  insertstate->bounds_valid = false;
480  return low;
481  }
482 
483  /*
484  * Binary search to find the first key on the page >= scan key. (nextkey
485  * is always false when inserting).
486  *
487  * The loop invariant is: all slots before 'low' are < scan key, all slots
488  * at or after 'high' are >= scan key. 'stricthigh' is > scan key, and is
489  * maintained to save additional search effort for caller.
490  *
491  * We can fall out when high == low.
492  */
493  if (!insertstate->bounds_valid)
494  high++; /* establish the loop invariant for high */
495  stricthigh = high; /* high initially strictly higher */
496 
497  cmpval = 1; /* !nextkey comparison value */
498 
499  while (high > low)
500  {
501  OffsetNumber mid = low + ((high - low) / 2);
502 
503  /* We have low <= mid < high, so mid points at a real slot */
504 
505  result = _bt_compare(rel, key, page, mid);
506 
507  if (result >= cmpval)
508  low = mid + 1;
509  else
510  {
511  high = mid;
512  if (result != 0)
513  stricthigh = high;
514  }
515 
516  /*
517  * If tuple at offset located by binary search is a posting list whose
518  * TID range overlaps with caller's scantid, perform posting list
519  * binary search to set postingoff for caller. Caller must split the
520  * posting list when postingoff is set. This should happen
521  * infrequently.
522  */
523  if (unlikely(result == 0 && key->scantid != NULL))
524  {
525  /*
526  * postingoff should never be set more than once per leaf page
527  * binary search. That would mean that there are duplicate table
528  * TIDs in the index, which is never okay. Check for that here.
529  */
530  if (insertstate->postingoff != 0)
531  ereport(ERROR,
532  (errcode(ERRCODE_INDEX_CORRUPTED),
533  errmsg_internal("table tid from new index tuple (%u,%u) cannot find insert offset between offsets %u and %u of block %u in index \"%s\"",
534  ItemPointerGetBlockNumber(key->scantid),
536  low, stricthigh,
537  BufferGetBlockNumber(insertstate->buf),
538  RelationGetRelationName(rel))));
539 
540  insertstate->postingoff = _bt_binsrch_posting(key, page, mid);
541  }
542  }
543 
544  /*
545  * On a leaf page, a binary search always returns the first key >= scan
546  * key (at least in !nextkey case), which could be the last slot + 1. This
547  * is also the lower bound of cached search.
548  *
549  * stricthigh may also be the last slot + 1, which prevents caller from
550  * using bounds directly, but is still useful to us if we're called a
551  * second time with cached bounds (cached low will be < stricthigh when
552  * that happens).
553  */
554  insertstate->low = low;
555  insertstate->stricthigh = stricthigh;
556  insertstate->bounds_valid = true;
557 
558  return low;
559 }
560 
561 /*----------
562  * _bt_binsrch_posting() -- posting list binary search.
563  *
564  * Helper routine for _bt_binsrch_insert().
565  *
566  * Returns offset into posting list where caller's scantid belongs.
567  *----------
568  */
569 static int
571 {
572  IndexTuple itup;
573  ItemId itemid;
574  int low,
575  high,
576  mid,
577  res;
578 
579  /*
580  * If this isn't a posting tuple, then the index must be corrupt (if it is
581  * an ordinary non-pivot tuple then there must be an existing tuple with a
582  * heap TID that equals inserter's new heap TID/scantid). Defensively
583  * check that tuple is a posting list tuple whose posting list range
584  * includes caller's scantid.
585  *
586  * (This is also needed because contrib/amcheck's rootdescend option needs
587  * to be able to relocate a non-pivot tuple using _bt_binsrch_insert().)
588  */
589  itemid = PageGetItemId(page, offnum);
590  itup = (IndexTuple) PageGetItem(page, itemid);
591  if (!BTreeTupleIsPosting(itup))
592  return 0;
593 
594  Assert(key->heapkeyspace && key->allequalimage);
595 
596  /*
597  * In the event that posting list tuple has LP_DEAD bit set, indicate this
598  * to _bt_binsrch_insert() caller by returning -1, a sentinel value. A
599  * second call to _bt_binsrch_insert() can take place when its caller has
600  * removed the dead item.
601  */
602  if (ItemIdIsDead(itemid))
603  return -1;
604 
605  /* "high" is past end of posting list for loop invariant */
606  low = 0;
607  high = BTreeTupleGetNPosting(itup);
608  Assert(high >= 2);
609 
610  while (high > low)
611  {
612  mid = low + ((high - low) / 2);
613  res = ItemPointerCompare(key->scantid,
614  BTreeTupleGetPostingN(itup, mid));
615 
616  if (res > 0)
617  low = mid + 1;
618  else if (res < 0)
619  high = mid;
620  else
621  return mid;
622  }
623 
624  /* Exact match not found */
625  return low;
626 }
627 
628 /*----------
629  * _bt_compare() -- Compare insertion-type scankey to tuple on a page.
630  *
631  * page/offnum: location of btree item to be compared to.
632  *
633  * This routine returns:
634  * <0 if scankey < tuple at offnum;
635  * 0 if scankey == tuple at offnum;
636  * >0 if scankey > tuple at offnum.
637  *
638  * NULLs in the keys are treated as sortable values. Therefore
639  * "equality" does not necessarily mean that the item should be returned
640  * to the caller as a matching key. Similarly, an insertion scankey
641  * with its scantid set is treated as equal to a posting tuple whose TID
642  * range overlaps with their scantid. There generally won't be a
643  * matching TID in the posting tuple, which caller must handle
644  * themselves (e.g., by splitting the posting list tuple).
645  *
646  * CRUCIAL NOTE: on a non-leaf page, the first data key is assumed to be
647  * "minus infinity": this routine will always claim it is less than the
648  * scankey. The actual key value stored is explicitly truncated to 0
649  * attributes (explicitly minus infinity) with version 3+ indexes, but
650  * that isn't relied upon. This allows us to implement the Lehman and
651  * Yao convention that the first down-link pointer is before the first
652  * key. See backend/access/nbtree/README for details.
653  *----------
654  */
655 int32
658  Page page,
659  OffsetNumber offnum)
660 {
661  TupleDesc itupdesc = RelationGetDescr(rel);
662  BTPageOpaque opaque = BTPageGetOpaque(page);
663  IndexTuple itup;
664  ItemPointer heapTid;
665  ScanKey scankey;
666  int ncmpkey;
667  int ntupatts;
668  int32 result;
669 
670  Assert(_bt_check_natts(rel, key->heapkeyspace, page, offnum));
672  Assert(key->heapkeyspace || key->scantid == NULL);
673 
674  /*
675  * Force result ">" if target item is first data item on an internal page
676  * --- see NOTE above.
677  */
678  if (!P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
679  return 1;
680 
681  itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
682  ntupatts = BTreeTupleGetNAtts(itup, rel);
683 
684  /*
685  * The scan key is set up with the attribute number associated with each
686  * term in the key. It is important that, if the index is multi-key, the
687  * scan contain the first k key attributes, and that they be in order. If
688  * you think about how multi-key ordering works, you'll understand why
689  * this is.
690  *
691  * We don't test for violation of this condition here, however. The
692  * initial setup for the index scan had better have gotten it right (see
693  * _bt_first).
694  */
695 
696  ncmpkey = Min(ntupatts, key->keysz);
697  Assert(key->heapkeyspace || ncmpkey == key->keysz);
698  Assert(!BTreeTupleIsPosting(itup) || key->allequalimage);
699  scankey = key->scankeys;
700  for (int i = 1; i <= ncmpkey; i++)
701  {
702  Datum datum;
703  bool isNull;
704 
705  datum = index_getattr(itup, scankey->sk_attno, itupdesc, &isNull);
706 
707  if (scankey->sk_flags & SK_ISNULL) /* key is NULL */
708  {
709  if (isNull)
710  result = 0; /* NULL "=" NULL */
711  else if (scankey->sk_flags & SK_BT_NULLS_FIRST)
712  result = -1; /* NULL "<" NOT_NULL */
713  else
714  result = 1; /* NULL ">" NOT_NULL */
715  }
716  else if (isNull) /* key is NOT_NULL and item is NULL */
717  {
718  if (scankey->sk_flags & SK_BT_NULLS_FIRST)
719  result = 1; /* NOT_NULL ">" NULL */
720  else
721  result = -1; /* NOT_NULL "<" NULL */
722  }
723  else
724  {
725  /*
726  * The sk_func needs to be passed the index value as left arg and
727  * the sk_argument as right arg (they might be of different
728  * types). Since it is convenient for callers to think of
729  * _bt_compare as comparing the scankey to the index item, we have
730  * to flip the sign of the comparison result. (Unless it's a DESC
731  * column, in which case we *don't* flip the sign.)
732  */
733  result = DatumGetInt32(FunctionCall2Coll(&scankey->sk_func,
734  scankey->sk_collation,
735  datum,
736  scankey->sk_argument));
737 
738  if (!(scankey->sk_flags & SK_BT_DESC))
739  INVERT_COMPARE_RESULT(result);
740  }
741 
742  /* if the keys are unequal, return the difference */
743  if (result != 0)
744  return result;
745 
746  scankey++;
747  }
748 
749  /*
750  * All non-truncated attributes (other than heap TID) were found to be
751  * equal. Treat truncated attributes as minus infinity when scankey has a
752  * key attribute value that would otherwise be compared directly.
753  *
754  * Note: it doesn't matter if ntupatts includes non-key attributes;
755  * scankey won't, so explicitly excluding non-key attributes isn't
756  * necessary.
757  */
758  if (key->keysz > ntupatts)
759  return 1;
760 
761  /*
762  * Use the heap TID attribute and scantid to try to break the tie. The
763  * rules are the same as any other key attribute -- only the
764  * representation differs.
765  */
766  heapTid = BTreeTupleGetHeapTID(itup);
767  if (key->scantid == NULL)
768  {
769  /*
770  * Most searches have a scankey that is considered greater than a
771  * truncated pivot tuple if and when the scankey has equal values for
772  * attributes up to and including the least significant untruncated
773  * attribute in tuple.
774  *
775  * For example, if an index has the minimum two attributes (single
776  * user key attribute, plus heap TID attribute), and a page's high key
777  * is ('foo', -inf), and scankey is ('foo', <omitted>), the search
778  * will not descend to the page to the left. The search will descend
779  * right instead. The truncated attribute in pivot tuple means that
780  * all non-pivot tuples on the page to the left are strictly < 'foo',
781  * so it isn't necessary to descend left. In other words, search
782  * doesn't have to descend left because it isn't interested in a match
783  * that has a heap TID value of -inf.
784  *
785  * However, some searches (pivotsearch searches) actually require that
786  * we descend left when this happens. -inf is treated as a possible
787  * match for omitted scankey attribute(s). This is needed by page
788  * deletion, which must re-find leaf pages that are targets for
789  * deletion using their high keys.
790  *
791  * Note: the heap TID part of the test ensures that scankey is being
792  * compared to a pivot tuple with one or more truncated key
793  * attributes.
794  *
795  * Note: pg_upgrade'd !heapkeyspace indexes must always descend to the
796  * left here, since they have no heap TID attribute (and cannot have
797  * any -inf key values in any case, since truncation can only remove
798  * non-key attributes). !heapkeyspace searches must always be
799  * prepared to deal with matches on both sides of the pivot once the
800  * leaf level is reached.
801  */
802  if (key->heapkeyspace && !key->pivotsearch &&
803  key->keysz == ntupatts && heapTid == NULL)
804  return 1;
805 
806  /* All provided scankey arguments found to be equal */
807  return 0;
808  }
809 
810  /*
811  * Treat truncated heap TID as minus infinity, since scankey has a key
812  * attribute value (scantid) that would otherwise be compared directly
813  */
815  if (heapTid == NULL)
816  return 1;
817 
818  /*
819  * Scankey must be treated as equal to a posting list tuple if its scantid
820  * value falls within the range of the posting list. In all other cases
821  * there can only be a single heap TID value, which is compared directly
822  * with scantid.
823  */
825  result = ItemPointerCompare(key->scantid, heapTid);
826  if (result <= 0 || !BTreeTupleIsPosting(itup))
827  return result;
828  else
829  {
830  result = ItemPointerCompare(key->scantid,
832  if (result > 0)
833  return 1;
834  }
835 
836  return 0;
837 }
838 
839 /*
840  * _bt_first() -- Find the first item in a scan.
841  *
842  * We need to be clever about the direction of scan, the search
843  * conditions, and the tree ordering. We find the first item (or,
844  * if backwards scan, the last item) in the tree that satisfies the
845  * qualifications in the scan key. On success exit, the page containing
846  * the current index tuple is pinned but not locked, and data about
847  * the matching tuple(s) on the page has been loaded into so->currPos.
848  * scan->xs_ctup.t_self is set to the heap TID of the current tuple,
849  * and if requested, scan->xs_itup points to a copy of the index tuple.
850  *
851  * If there are no matching items in the index, we return false, with no
852  * pins or locks held.
853  *
854  * Note that scan->keyData[], and the so->keyData[] scankey built from it,
855  * are both search-type scankeys (see nbtree/README for more about this).
856  * Within this routine, we build a temporary insertion-type scankey to use
857  * in locating the scan start position.
858  */
859 bool
861 {
862  Relation rel = scan->indexRelation;
863  BTScanOpaque so = (BTScanOpaque) scan->opaque;
864  Buffer buf;
865  BTStack stack;
866  OffsetNumber offnum;
867  StrategyNumber strat;
868  bool nextkey;
869  bool goback;
870  BTScanInsertData inskey;
871  ScanKey startKeys[INDEX_MAX_KEYS];
872  ScanKeyData notnullkeys[INDEX_MAX_KEYS];
873  int keysCount = 0;
874  int i;
875  bool status;
876  StrategyNumber strat_total;
877  BTScanPosItem *currItem;
878  BlockNumber blkno;
879 
881 
883 
884  /*
885  * Examine the scan keys and eliminate any redundant keys; also mark the
886  * keys that must be matched to continue the scan.
887  */
888  _bt_preprocess_keys(scan);
889 
890  /*
891  * Quit now if _bt_preprocess_keys() discovered that the scan keys can
892  * never be satisfied (eg, x == 1 AND x > 2).
893  */
894  if (!so->qual_ok)
895  {
896  /* Notify any other workers that we're done with this scan key. */
897  _bt_parallel_done(scan);
898  return false;
899  }
900 
901  /*
902  * For parallel scans, get the starting page from shared state. If the
903  * scan has not started, proceed to find out first leaf page in the usual
904  * way while keeping other participating processes waiting. If the scan
905  * has already begun, use the page number from the shared structure.
906  */
907  if (scan->parallel_scan != NULL)
908  {
909  status = _bt_parallel_seize(scan, &blkno);
910  if (!status)
911  return false;
912  else if (blkno == P_NONE)
913  {
914  _bt_parallel_done(scan);
915  return false;
916  }
917  else if (blkno != InvalidBlockNumber)
918  {
919  if (!_bt_parallel_readpage(scan, blkno, dir))
920  return false;
921  goto readcomplete;
922  }
923  }
924 
925  /*----------
926  * Examine the scan keys to discover where we need to start the scan.
927  *
928  * We want to identify the keys that can be used as starting boundaries;
929  * these are =, >, or >= keys for a forward scan or =, <, <= keys for
930  * a backwards scan. We can use keys for multiple attributes so long as
931  * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
932  * a > or < boundary or find an attribute with no boundary (which can be
933  * thought of as the same as "> -infinity"), we can't use keys for any
934  * attributes to its right, because it would break our simplistic notion
935  * of what initial positioning strategy to use.
936  *
937  * When the scan keys include cross-type operators, _bt_preprocess_keys
938  * may not be able to eliminate redundant keys; in such cases we will
939  * arbitrarily pick a usable one for each attribute. This is correct
940  * but possibly not optimal behavior. (For example, with keys like
941  * "x >= 4 AND x >= 5" we would elect to scan starting at x=4 when
942  * x=5 would be more efficient.) Since the situation only arises given
943  * a poorly-worded query plus an incomplete opfamily, live with it.
944  *
945  * When both equality and inequality keys appear for a single attribute
946  * (again, only possible when cross-type operators appear), we *must*
947  * select one of the equality keys for the starting point, because
948  * _bt_checkkeys() will stop the scan as soon as an equality qual fails.
949  * For example, if we have keys like "x >= 4 AND x = 10" and we elect to
950  * start at x=4, we will fail and stop before reaching x=10. If multiple
951  * equality quals survive preprocessing, however, it doesn't matter which
952  * one we use --- by definition, they are either redundant or
953  * contradictory.
954  *
955  * Any regular (not SK_SEARCHNULL) key implies a NOT NULL qualifier.
956  * If the index stores nulls at the end of the index we'll be starting
957  * from, and we have no boundary key for the column (which means the key
958  * we deduced NOT NULL from is an inequality key that constrains the other
959  * end of the index), then we cons up an explicit SK_SEARCHNOTNULL key to
960  * use as a boundary key. If we didn't do this, we might find ourselves
961  * traversing a lot of null entries at the start of the scan.
962  *
963  * In this loop, row-comparison keys are treated the same as keys on their
964  * first (leftmost) columns. We'll add on lower-order columns of the row
965  * comparison below, if possible.
966  *
967  * The selected scan keys (at most one per index column) are remembered by
968  * storing their addresses into the local startKeys[] array.
969  *----------
970  */
971  strat_total = BTEqualStrategyNumber;
972  if (so->numberOfKeys > 0)
973  {
974  AttrNumber curattr;
975  ScanKey chosen;
976  ScanKey impliesNN;
977  ScanKey cur;
978 
979  /*
980  * chosen is the so-far-chosen key for the current attribute, if any.
981  * We don't cast the decision in stone until we reach keys for the
982  * next attribute.
983  */
984  curattr = 1;
985  chosen = NULL;
986  /* Also remember any scankey that implies a NOT NULL constraint */
987  impliesNN = NULL;
988 
989  /*
990  * Loop iterates from 0 to numberOfKeys inclusive; we use the last
991  * pass to handle after-last-key processing. Actual exit from the
992  * loop is at one of the "break" statements below.
993  */
994  for (cur = so->keyData, i = 0;; cur++, i++)
995  {
996  if (i >= so->numberOfKeys || cur->sk_attno != curattr)
997  {
998  /*
999  * Done looking at keys for curattr. If we didn't find a
1000  * usable boundary key, see if we can deduce a NOT NULL key.
1001  */
1002  if (chosen == NULL && impliesNN != NULL &&
1003  ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
1004  ScanDirectionIsForward(dir) :
1006  {
1007  /* Yes, so build the key in notnullkeys[keysCount] */
1008  chosen = &notnullkeys[keysCount];
1009  ScanKeyEntryInitialize(chosen,
1011  (impliesNN->sk_flags &
1013  curattr,
1014  ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
1017  InvalidOid,
1018  InvalidOid,
1019  InvalidOid,
1020  (Datum) 0);
1021  }
1022 
1023  /*
1024  * If we still didn't find a usable boundary key, quit; else
1025  * save the boundary key pointer in startKeys.
1026  */
1027  if (chosen == NULL)
1028  break;
1029  startKeys[keysCount++] = chosen;
1030 
1031  /*
1032  * Adjust strat_total, and quit if we have stored a > or <
1033  * key.
1034  */
1035  strat = chosen->sk_strategy;
1036  if (strat != BTEqualStrategyNumber)
1037  {
1038  strat_total = strat;
1039  if (strat == BTGreaterStrategyNumber ||
1040  strat == BTLessStrategyNumber)
1041  break;
1042  }
1043 
1044  /*
1045  * Done if that was the last attribute, or if next key is not
1046  * in sequence (implying no boundary key is available for the
1047  * next attribute).
1048  */
1049  if (i >= so->numberOfKeys ||
1050  cur->sk_attno != curattr + 1)
1051  break;
1052 
1053  /*
1054  * Reset for next attr.
1055  */
1056  curattr = cur->sk_attno;
1057  chosen = NULL;
1058  impliesNN = NULL;
1059  }
1060 
1061  /*
1062  * Can we use this key as a starting boundary for this attr?
1063  *
1064  * If not, does it imply a NOT NULL constraint? (Because
1065  * SK_SEARCHNULL keys are always assigned BTEqualStrategyNumber,
1066  * *any* inequality key works for that; we need not test.)
1067  */
1068  switch (cur->sk_strategy)
1069  {
1070  case BTLessStrategyNumber:
1072  if (chosen == NULL)
1073  {
1074  if (ScanDirectionIsBackward(dir))
1075  chosen = cur;
1076  else
1077  impliesNN = cur;
1078  }
1079  break;
1080  case BTEqualStrategyNumber:
1081  /* override any non-equality choice */
1082  chosen = cur;
1083  break;
1086  if (chosen == NULL)
1087  {
1088  if (ScanDirectionIsForward(dir))
1089  chosen = cur;
1090  else
1091  impliesNN = cur;
1092  }
1093  break;
1094  }
1095  }
1096  }
1097 
1098  /*
1099  * If we found no usable boundary keys, we have to start from one end of
1100  * the tree. Walk down that edge to the first or last key, and scan from
1101  * there.
1102  */
1103  if (keysCount == 0)
1104  {
1105  bool match;
1106 
1107  match = _bt_endpoint(scan, dir);
1108 
1109  if (!match)
1110  {
1111  /* No match, so mark (parallel) scan finished */
1112  _bt_parallel_done(scan);
1113  }
1114 
1115  return match;
1116  }
1117 
1118  /*
1119  * We want to start the scan somewhere within the index. Set up an
1120  * insertion scankey we can use to search for the boundary point we
1121  * identified above. The insertion scankey is built using the keys
1122  * identified by startKeys[]. (Remaining insertion scankey fields are
1123  * initialized after initial-positioning strategy is finalized.)
1124  */
1125  Assert(keysCount <= INDEX_MAX_KEYS);
1126  for (i = 0; i < keysCount; i++)
1127  {
1128  ScanKey cur = startKeys[i];
1129 
1130  Assert(cur->sk_attno == i + 1);
1131 
1132  if (cur->sk_flags & SK_ROW_HEADER)
1133  {
1134  /*
1135  * Row comparison header: look to the first row member instead.
1136  *
1137  * The member scankeys are already in insertion format (ie, they
1138  * have sk_func = 3-way-comparison function), but we have to watch
1139  * out for nulls, which _bt_preprocess_keys didn't check. A null
1140  * in the first row member makes the condition unmatchable, just
1141  * like qual_ok = false.
1142  */
1143  ScanKey subkey = (ScanKey) DatumGetPointer(cur->sk_argument);
1144 
1145  Assert(subkey->sk_flags & SK_ROW_MEMBER);
1146  if (subkey->sk_flags & SK_ISNULL)
1147  {
1148  _bt_parallel_done(scan);
1149  return false;
1150  }
1151  memcpy(inskey.scankeys + i, subkey, sizeof(ScanKeyData));
1152 
1153  /*
1154  * If the row comparison is the last positioning key we accepted,
1155  * try to add additional keys from the lower-order row members.
1156  * (If we accepted independent conditions on additional index
1157  * columns, we use those instead --- doesn't seem worth trying to
1158  * determine which is more restrictive.) Note that this is OK
1159  * even if the row comparison is of ">" or "<" type, because the
1160  * condition applied to all but the last row member is effectively
1161  * ">=" or "<=", and so the extra keys don't break the positioning
1162  * scheme. But, by the same token, if we aren't able to use all
1163  * the row members, then the part of the row comparison that we
1164  * did use has to be treated as just a ">=" or "<=" condition, and
1165  * so we'd better adjust strat_total accordingly.
1166  */
1167  if (i == keysCount - 1)
1168  {
1169  bool used_all_subkeys = false;
1170 
1171  Assert(!(subkey->sk_flags & SK_ROW_END));
1172  for (;;)
1173  {
1174  subkey++;
1175  Assert(subkey->sk_flags & SK_ROW_MEMBER);
1176  if (subkey->sk_attno != keysCount + 1)
1177  break; /* out-of-sequence, can't use it */
1178  if (subkey->sk_strategy != cur->sk_strategy)
1179  break; /* wrong direction, can't use it */
1180  if (subkey->sk_flags & SK_ISNULL)
1181  break; /* can't use null keys */
1182  Assert(keysCount < INDEX_MAX_KEYS);
1183  memcpy(inskey.scankeys + keysCount, subkey,
1184  sizeof(ScanKeyData));
1185  keysCount++;
1186  if (subkey->sk_flags & SK_ROW_END)
1187  {
1188  used_all_subkeys = true;
1189  break;
1190  }
1191  }
1192  if (!used_all_subkeys)
1193  {
1194  switch (strat_total)
1195  {
1196  case BTLessStrategyNumber:
1197  strat_total = BTLessEqualStrategyNumber;
1198  break;
1200  strat_total = BTGreaterEqualStrategyNumber;
1201  break;
1202  }
1203  }
1204  break; /* done with outer loop */
1205  }
1206  }
1207  else
1208  {
1209  /*
1210  * Ordinary comparison key. Transform the search-style scan key
1211  * to an insertion scan key by replacing the sk_func with the
1212  * appropriate btree comparison function.
1213  *
1214  * If scankey operator is not a cross-type comparison, we can use
1215  * the cached comparison function; otherwise gotta look it up in
1216  * the catalogs. (That can't lead to infinite recursion, since no
1217  * indexscan initiated by syscache lookup will use cross-data-type
1218  * operators.)
1219  *
1220  * We support the convention that sk_subtype == InvalidOid means
1221  * the opclass input type; this is a hack to simplify life for
1222  * ScanKeyInit().
1223  */
1224  if (cur->sk_subtype == rel->rd_opcintype[i] ||
1225  cur->sk_subtype == InvalidOid)
1226  {
1227  FmgrInfo *procinfo;
1228 
1229  procinfo = index_getprocinfo(rel, cur->sk_attno, BTORDER_PROC);
1230  ScanKeyEntryInitializeWithInfo(inskey.scankeys + i,
1231  cur->sk_flags,
1232  cur->sk_attno,
1234  cur->sk_subtype,
1235  cur->sk_collation,
1236  procinfo,
1237  cur->sk_argument);
1238  }
1239  else
1240  {
1241  RegProcedure cmp_proc;
1242 
1243  cmp_proc = get_opfamily_proc(rel->rd_opfamily[i],
1244  rel->rd_opcintype[i],
1245  cur->sk_subtype,
1246  BTORDER_PROC);
1247  if (!RegProcedureIsValid(cmp_proc))
1248  elog(ERROR, "missing support function %d(%u,%u) for attribute %d of index \"%s\"",
1249  BTORDER_PROC, rel->rd_opcintype[i], cur->sk_subtype,
1250  cur->sk_attno, RelationGetRelationName(rel));
1251  ScanKeyEntryInitialize(inskey.scankeys + i,
1252  cur->sk_flags,
1253  cur->sk_attno,
1255  cur->sk_subtype,
1256  cur->sk_collation,
1257  cmp_proc,
1258  cur->sk_argument);
1259  }
1260  }
1261  }
1262 
1263  /*----------
1264  * Examine the selected initial-positioning strategy to determine exactly
1265  * where we need to start the scan, and set flag variables to control the
1266  * code below.
1267  *
1268  * If nextkey = false, _bt_search and _bt_binsrch will locate the first
1269  * item >= scan key. If nextkey = true, they will locate the first
1270  * item > scan key.
1271  *
1272  * If goback = true, we will then step back one item, while if
1273  * goback = false, we will start the scan on the located item.
1274  *----------
1275  */
1276  switch (strat_total)
1277  {
1278  case BTLessStrategyNumber:
1279 
1280  /*
1281  * Find first item >= scankey, then back up one to arrive at last
1282  * item < scankey. (Note: this positioning strategy is only used
1283  * for a backward scan, so that is always the correct starting
1284  * position.)
1285  */
1286  nextkey = false;
1287  goback = true;
1288  break;
1289 
1291 
1292  /*
1293  * Find first item > scankey, then back up one to arrive at last
1294  * item <= scankey. (Note: this positioning strategy is only used
1295  * for a backward scan, so that is always the correct starting
1296  * position.)
1297  */
1298  nextkey = true;
1299  goback = true;
1300  break;
1301 
1302  case BTEqualStrategyNumber:
1303 
1304  /*
1305  * If a backward scan was specified, need to start with last equal
1306  * item not first one.
1307  */
1308  if (ScanDirectionIsBackward(dir))
1309  {
1310  /*
1311  * This is the same as the <= strategy. We will check at the
1312  * end whether the found item is actually =.
1313  */
1314  nextkey = true;
1315  goback = true;
1316  }
1317  else
1318  {
1319  /*
1320  * This is the same as the >= strategy. We will check at the
1321  * end whether the found item is actually =.
1322  */
1323  nextkey = false;
1324  goback = false;
1325  }
1326  break;
1327 
1329 
1330  /*
1331  * Find first item >= scankey. (This is only used for forward
1332  * scans.)
1333  */
1334  nextkey = false;
1335  goback = false;
1336  break;
1337 
1339 
1340  /*
1341  * Find first item > scankey. (This is only used for forward
1342  * scans.)
1343  */
1344  nextkey = true;
1345  goback = false;
1346  break;
1347 
1348  default:
1349  /* can't get here, but keep compiler quiet */
1350  elog(ERROR, "unrecognized strat_total: %d", (int) strat_total);
1351  return false;
1352  }
1353 
1354  /* Initialize remaining insertion scan key fields */
1355  _bt_metaversion(rel, &inskey.heapkeyspace, &inskey.allequalimage);
1356  inskey.anynullkeys = false; /* unused */
1357  inskey.nextkey = nextkey;
1358  inskey.pivotsearch = false;
1359  inskey.scantid = NULL;
1360  inskey.keysz = keysCount;
1361 
1362  /*
1363  * Use the manufactured insertion scan key to descend the tree and
1364  * position ourselves on the target leaf page.
1365  */
1366  stack = _bt_search(rel, &inskey, &buf, BT_READ, scan->xs_snapshot);
1367 
1368  /* don't need to keep the stack around... */
1369  _bt_freestack(stack);
1370 
1371  if (!BufferIsValid(buf))
1372  {
1373  /*
1374  * We only get here if the index is completely empty. Lock relation
1375  * because nothing finer to lock exists.
1376  */
1377  PredicateLockRelation(rel, scan->xs_snapshot);
1378 
1379  /*
1380  * mark parallel scan as done, so that all the workers can finish
1381  * their scan
1382  */
1383  _bt_parallel_done(scan);
1385 
1386  return false;
1387  }
1388  else
1390  scan->xs_snapshot);
1391 
1392  _bt_initialize_more_data(so, dir);
1393 
1394  /* position to the precise item on the page */
1395  offnum = _bt_binsrch(rel, &inskey, buf);
1396 
1397  /*
1398  * If nextkey = false, we are positioned at the first item >= scan key, or
1399  * possibly at the end of a page on which all the existing items are less
1400  * than the scan key and we know that everything on later pages is greater
1401  * than or equal to scan key.
1402  *
1403  * If nextkey = true, we are positioned at the first item > scan key, or
1404  * possibly at the end of a page on which all the existing items are less
1405  * than or equal to the scan key and we know that everything on later
1406  * pages is greater than scan key.
1407  *
1408  * The actually desired starting point is either this item or the prior
1409  * one, or in the end-of-page case it's the first item on the next page or
1410  * the last item on this page. Adjust the starting offset if needed. (If
1411  * this results in an offset before the first item or after the last one,
1412  * _bt_readpage will report no items found, and then we'll step to the
1413  * next page as needed.)
1414  */
1415  if (goback)
1416  offnum = OffsetNumberPrev(offnum);
1417 
1418  /* remember which buffer we have pinned, if any */
1420  so->currPos.buf = buf;
1421 
1422  /*
1423  * Now load data from the first page of the scan.
1424  */
1425  if (!_bt_readpage(scan, dir, offnum))
1426  {
1427  /*
1428  * There's no actually-matching data on this page. Try to advance to
1429  * the next page. Return false if there's no matching data at all.
1430  */
1431  _bt_unlockbuf(scan->indexRelation, so->currPos.buf);
1432  if (!_bt_steppage(scan, dir))
1433  return false;
1434  }
1435  else
1436  {
1437  /* Drop the lock, and maybe the pin, on the current page */
1439  }
1440 
1441 readcomplete:
1442  /* OK, itemIndex says what to return */
1443  currItem = &so->currPos.items[so->currPos.itemIndex];
1444  scan->xs_heaptid = currItem->heapTid;
1445  if (scan->xs_want_itup)
1446  scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset);
1447 
1448  return true;
1449 }
1450 
1451 /*
1452  * _bt_next() -- Get the next item in a scan.
1453  *
1454  * On entry, so->currPos describes the current page, which may be pinned
1455  * but is not locked, and so->currPos.itemIndex identifies which item was
1456  * previously returned.
1457  *
1458  * On successful exit, scan->xs_ctup.t_self is set to the TID of the
1459  * next heap tuple, and if requested, scan->xs_itup points to a copy of
1460  * the index tuple. so->currPos is updated as needed.
1461  *
1462  * On failure exit (no more tuples), we release pin and set
1463  * so->currPos.buf to InvalidBuffer.
1464  */
1465 bool
1467 {
1468  BTScanOpaque so = (BTScanOpaque) scan->opaque;
1469  BTScanPosItem *currItem;
1470 
1471  /*
1472  * Advance to next tuple on current page; or if there's no more, try to
1473  * step to the next page with data.
1474  */
1475  if (ScanDirectionIsForward(dir))
1476  {
1477  if (++so->currPos.itemIndex > so->currPos.lastItem)
1478  {
1479  if (!_bt_steppage(scan, dir))
1480  return false;
1481  }
1482  }
1483  else
1484  {
1485  if (--so->currPos.itemIndex < so->currPos.firstItem)
1486  {
1487  if (!_bt_steppage(scan, dir))
1488  return false;
1489  }
1490  }
1491 
1492  /* OK, itemIndex says what to return */
1493  currItem = &so->currPos.items[so->currPos.itemIndex];
1494  scan->xs_heaptid = currItem->heapTid;
1495  if (scan->xs_want_itup)
1496  scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset);
1497 
1498  return true;
1499 }
1500 
1501 /*
1502  * _bt_readpage() -- Load data from current index page into so->currPos
1503  *
1504  * Caller must have pinned and read-locked so->currPos.buf; the buffer's state
1505  * is not changed here. Also, currPos.moreLeft and moreRight must be valid;
1506  * they are updated as appropriate. All other fields of so->currPos are
1507  * initialized from scratch here.
1508  *
1509  * We scan the current page starting at offnum and moving in the indicated
1510  * direction. All items matching the scan keys are loaded into currPos.items.
1511  * moreLeft or moreRight (as appropriate) is cleared if _bt_checkkeys reports
1512  * that there can be no more matching tuples in the current scan direction.
1513  *
1514  * In the case of a parallel scan, caller must have called _bt_parallel_seize
1515  * prior to calling this function; this function will invoke
1516  * _bt_parallel_release before returning.
1517  *
1518  * Returns true if any matching items found on the page, false if none.
1519  */
1520 static bool
1522 {
1523  BTScanOpaque so = (BTScanOpaque) scan->opaque;
1524  Page page;
1525  BTPageOpaque opaque;
1526  OffsetNumber minoff;
1527  OffsetNumber maxoff;
1528  int itemIndex;
1529  bool continuescan;
1530  int indnatts;
1531 
1532  /*
1533  * We must have the buffer pinned and locked, but the usual macro can't be
1534  * used here; this function is what makes it good for currPos.
1535  */
1537 
1538  page = BufferGetPage(so->currPos.buf);
1539  opaque = BTPageGetOpaque(page);
1540 
1541  /* allow next page be processed by parallel worker */
1542  if (scan->parallel_scan)
1543  {
1544  if (ScanDirectionIsForward(dir))
1545  _bt_parallel_release(scan, opaque->btpo_next);
1546  else
1548  }
1549 
1550  continuescan = true; /* default assumption */
1552  minoff = P_FIRSTDATAKEY(opaque);
1553  maxoff = PageGetMaxOffsetNumber(page);
1554 
1555  /*
1556  * We note the buffer's block number so that we can release the pin later.
1557  * This allows us to re-read the buffer if it is needed again for hinting.
1558  */
1560 
1561  /*
1562  * We save the LSN of the page as we read it, so that we know whether it
1563  * safe to apply LP_DEAD hints to the page later. This allows us to drop
1564  * the pin for MVCC scans, which allows vacuum to avoid blocking.
1565  */
1567 
1568  /*
1569  * we must save the page's right-link while scanning it; this tells us
1570  * where to step right to after we're done with these items. There is no
1571  * corresponding need for the left-link, since splits always go right.
1572  */
1573  so->currPos.nextPage = opaque->btpo_next;
1574 
1575  /* initialize tuple workspace to empty */
1576  so->currPos.nextTupleOffset = 0;
1577 
1578  /*
1579  * Now that the current page has been made consistent, the macro should be
1580  * good.
1581  */
1583 
1584  if (ScanDirectionIsForward(dir))
1585  {
1586  /* load items[] in ascending order */
1587  itemIndex = 0;
1588 
1589  offnum = Max(offnum, minoff);
1590 
1591  while (offnum <= maxoff)
1592  {
1593  ItemId iid = PageGetItemId(page, offnum);
1594  IndexTuple itup;
1595 
1596  /*
1597  * If the scan specifies not to return killed tuples, then we
1598  * treat a killed tuple as not passing the qual
1599  */
1600  if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
1601  {
1602  offnum = OffsetNumberNext(offnum);
1603  continue;
1604  }
1605 
1606  itup = (IndexTuple) PageGetItem(page, iid);
1607 
1608  if (_bt_checkkeys(scan, itup, indnatts, dir, &continuescan))
1609  {
1610  /* tuple passes all scan key conditions */
1611  if (!BTreeTupleIsPosting(itup))
1612  {
1613  /* Remember it */
1614  _bt_saveitem(so, itemIndex, offnum, itup);
1615  itemIndex++;
1616  }
1617  else
1618  {
1619  int tupleOffset;
1620 
1621  /*
1622  * Set up state to return posting list, and remember first
1623  * TID
1624  */
1625  tupleOffset =
1626  _bt_setuppostingitems(so, itemIndex, offnum,
1627  BTreeTupleGetPostingN(itup, 0),
1628  itup);
1629  itemIndex++;
1630  /* Remember additional TIDs */
1631  for (int i = 1; i < BTreeTupleGetNPosting(itup); i++)
1632  {
1633  _bt_savepostingitem(so, itemIndex, offnum,
1634  BTreeTupleGetPostingN(itup, i),
1635  tupleOffset);
1636  itemIndex++;
1637  }
1638  }
1639  }
1640  /* When !continuescan, there can't be any more matches, so stop */
1641  if (!continuescan)
1642  break;
1643 
1644  offnum = OffsetNumberNext(offnum);
1645  }
1646 
1647  /*
1648  * We don't need to visit page to the right when the high key
1649  * indicates that no more matches will be found there.
1650  *
1651  * Checking the high key like this works out more often than you might
1652  * think. Leaf page splits pick a split point between the two most
1653  * dissimilar tuples (this is weighed against the need to evenly share
1654  * free space). Leaf pages with high key attribute values that can
1655  * only appear on non-pivot tuples on the right sibling page are
1656  * common.
1657  */
1658  if (continuescan && !P_RIGHTMOST(opaque))
1659  {
1660  ItemId iid = PageGetItemId(page, P_HIKEY);
1661  IndexTuple itup = (IndexTuple) PageGetItem(page, iid);
1662  int truncatt;
1663 
1664  truncatt = BTreeTupleGetNAtts(itup, scan->indexRelation);
1665  _bt_checkkeys(scan, itup, truncatt, dir, &continuescan);
1666  }
1667 
1668  if (!continuescan)
1669  so->currPos.moreRight = false;
1670 
1671  Assert(itemIndex <= MaxTIDsPerBTreePage);
1672  so->currPos.firstItem = 0;
1673  so->currPos.lastItem = itemIndex - 1;
1674  so->currPos.itemIndex = 0;
1675  }
1676  else
1677  {
1678  /* load items[] in descending order */
1679  itemIndex = MaxTIDsPerBTreePage;
1680 
1681  offnum = Min(offnum, maxoff);
1682 
1683  while (offnum >= minoff)
1684  {
1685  ItemId iid = PageGetItemId(page, offnum);
1686  IndexTuple itup;
1687  bool tuple_alive;
1688  bool passes_quals;
1689 
1690  /*
1691  * If the scan specifies not to return killed tuples, then we
1692  * treat a killed tuple as not passing the qual. Most of the
1693  * time, it's a win to not bother examining the tuple's index
1694  * keys, but just skip to the next tuple (previous, actually,
1695  * since we're scanning backwards). However, if this is the first
1696  * tuple on the page, we do check the index keys, to prevent
1697  * uselessly advancing to the page to the left. This is similar
1698  * to the high key optimization used by forward scans.
1699  */
1700  if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
1701  {
1702  Assert(offnum >= P_FIRSTDATAKEY(opaque));
1703  if (offnum > P_FIRSTDATAKEY(opaque))
1704  {
1705  offnum = OffsetNumberPrev(offnum);
1706  continue;
1707  }
1708 
1709  tuple_alive = false;
1710  }
1711  else
1712  tuple_alive = true;
1713 
1714  itup = (IndexTuple) PageGetItem(page, iid);
1715 
1716  passes_quals = _bt_checkkeys(scan, itup, indnatts, dir,
1717  &continuescan);
1718  if (passes_quals && tuple_alive)
1719  {
1720  /* tuple passes all scan key conditions */
1721  if (!BTreeTupleIsPosting(itup))
1722  {
1723  /* Remember it */
1724  itemIndex--;
1725  _bt_saveitem(so, itemIndex, offnum, itup);
1726  }
1727  else
1728  {
1729  int tupleOffset;
1730 
1731  /*
1732  * Set up state to return posting list, and remember first
1733  * TID.
1734  *
1735  * Note that we deliberately save/return items from
1736  * posting lists in ascending heap TID order for backwards
1737  * scans. This allows _bt_killitems() to make a
1738  * consistent assumption about the order of items
1739  * associated with the same posting list tuple.
1740  */
1741  itemIndex--;
1742  tupleOffset =
1743  _bt_setuppostingitems(so, itemIndex, offnum,
1744  BTreeTupleGetPostingN(itup, 0),
1745  itup);
1746  /* Remember additional TIDs */
1747  for (int i = 1; i < BTreeTupleGetNPosting(itup); i++)
1748  {
1749  itemIndex--;
1750  _bt_savepostingitem(so, itemIndex, offnum,
1751  BTreeTupleGetPostingN(itup, i),
1752  tupleOffset);
1753  }
1754  }
1755  }
1756  if (!continuescan)
1757  {
1758  /* there can't be any more matches, so stop */
1759  so->currPos.moreLeft = false;
1760  break;
1761  }
1762 
1763  offnum = OffsetNumberPrev(offnum);
1764  }
1765 
1766  Assert(itemIndex >= 0);
1767  so->currPos.firstItem = itemIndex;
1770  }
1771 
1772  return (so->currPos.firstItem <= so->currPos.lastItem);
1773 }
1774 
1775 /* Save an index item into so->currPos.items[itemIndex] */
1776 static void
1777 _bt_saveitem(BTScanOpaque so, int itemIndex,
1778  OffsetNumber offnum, IndexTuple itup)
1779 {
1780  BTScanPosItem *currItem = &so->currPos.items[itemIndex];
1781 
1782  Assert(!BTreeTupleIsPivot(itup) && !BTreeTupleIsPosting(itup));
1783 
1784  currItem->heapTid = itup->t_tid;
1785  currItem->indexOffset = offnum;
1786  if (so->currTuples)
1787  {
1788  Size itupsz = IndexTupleSize(itup);
1789 
1790  currItem->tupleOffset = so->currPos.nextTupleOffset;
1791  memcpy(so->currTuples + so->currPos.nextTupleOffset, itup, itupsz);
1792  so->currPos.nextTupleOffset += MAXALIGN(itupsz);
1793  }
1794 }
1795 
1796 /*
1797  * Setup state to save TIDs/items from a single posting list tuple.
1798  *
1799  * Saves an index item into so->currPos.items[itemIndex] for TID that is
1800  * returned to scan first. Second or subsequent TIDs for posting list should
1801  * be saved by calling _bt_savepostingitem().
1802  *
1803  * Returns an offset into tuple storage space that main tuple is stored at if
1804  * needed.
1805  */
1806 static int
1808  ItemPointer heapTid, IndexTuple itup)
1809 {
1810  BTScanPosItem *currItem = &so->currPos.items[itemIndex];
1811 
1812  Assert(BTreeTupleIsPosting(itup));
1813 
1814  currItem->heapTid = *heapTid;
1815  currItem->indexOffset = offnum;
1816  if (so->currTuples)
1817  {
1818  /* Save base IndexTuple (truncate posting list) */
1819  IndexTuple base;
1820  Size itupsz = BTreeTupleGetPostingOffset(itup);
1821 
1822  itupsz = MAXALIGN(itupsz);
1823  currItem->tupleOffset = so->currPos.nextTupleOffset;
1824  base = (IndexTuple) (so->currTuples + so->currPos.nextTupleOffset);
1825  memcpy(base, itup, itupsz);
1826  /* Defensively reduce work area index tuple header size */
1827  base->t_info &= ~INDEX_SIZE_MASK;
1828  base->t_info |= itupsz;
1829  so->currPos.nextTupleOffset += itupsz;
1830 
1831  return currItem->tupleOffset;
1832  }
1833 
1834  return 0;
1835 }
1836 
1837 /*
1838  * Save an index item into so->currPos.items[itemIndex] for current posting
1839  * tuple.
1840  *
1841  * Assumes that _bt_setuppostingitems() has already been called for current
1842  * posting list tuple. Caller passes its return value as tupleOffset.
1843  */
1844 static inline void
1846  ItemPointer heapTid, int tupleOffset)
1847 {
1848  BTScanPosItem *currItem = &so->currPos.items[itemIndex];
1849 
1850  currItem->heapTid = *heapTid;
1851  currItem->indexOffset = offnum;
1852 
1853  /*
1854  * Have index-only scans return the same base IndexTuple for every TID
1855  * that originates from the same posting list
1856  */
1857  if (so->currTuples)
1858  currItem->tupleOffset = tupleOffset;
1859 }
1860 
1861 /*
1862  * _bt_steppage() -- Step to next page containing valid data for scan
1863  *
1864  * On entry, if so->currPos.buf is valid the buffer is pinned but not locked;
1865  * if pinned, we'll drop the pin before moving to next page. The buffer is
1866  * not locked on entry.
1867  *
1868  * For success on a scan using a non-MVCC snapshot we hold a pin, but not a
1869  * read lock, on that page. If we do not hold the pin, we set so->currPos.buf
1870  * to InvalidBuffer. We return true to indicate success.
1871  */
1872 static bool
1874 {
1875  BTScanOpaque so = (BTScanOpaque) scan->opaque;
1877  bool status;
1878 
1880 
1881  /* Before leaving current page, deal with any killed items */
1882  if (so->numKilled > 0)
1883  _bt_killitems(scan);
1884 
1885  /*
1886  * Before we modify currPos, make a copy of the page data if there was a
1887  * mark position that needs it.
1888  */
1889  if (so->markItemIndex >= 0)
1890  {
1891  /* bump pin on current buffer for assignment to mark buffer */
1892  if (BTScanPosIsPinned(so->currPos))
1894  memcpy(&so->markPos, &so->currPos,
1895  offsetof(BTScanPosData, items[1]) +
1896  so->currPos.lastItem * sizeof(BTScanPosItem));
1897  if (so->markTuples)
1898  memcpy(so->markTuples, so->currTuples,
1899  so->currPos.nextTupleOffset);
1900  so->markPos.itemIndex = so->markItemIndex;
1901  so->markItemIndex = -1;
1902  }
1903 
1904  if (ScanDirectionIsForward(dir))
1905  {
1906  /* Walk right to the next page with data */
1907  if (scan->parallel_scan != NULL)
1908  {
1909  /*
1910  * Seize the scan to get the next block number; if the scan has
1911  * ended already, bail out.
1912  */
1913  status = _bt_parallel_seize(scan, &blkno);
1914  if (!status)
1915  {
1916  /* release the previous buffer, if pinned */
1919  return false;
1920  }
1921  }
1922  else
1923  {
1924  /* Not parallel, so use the previously-saved nextPage link. */
1925  blkno = so->currPos.nextPage;
1926  }
1927 
1928  /* Remember we left a page with data */
1929  so->currPos.moreLeft = true;
1930 
1931  /* release the previous buffer, if pinned */
1933  }
1934  else
1935  {
1936  /* Remember we left a page with data */
1937  so->currPos.moreRight = true;
1938 
1939  if (scan->parallel_scan != NULL)
1940  {
1941  /*
1942  * Seize the scan to get the current block number; if the scan has
1943  * ended already, bail out.
1944  */
1945  status = _bt_parallel_seize(scan, &blkno);
1947  if (!status)
1948  {
1950  return false;
1951  }
1952  }
1953  else
1954  {
1955  /* Not parallel, so just use our own notion of the current page */
1956  blkno = so->currPos.currPage;
1957  }
1958  }
1959 
1960  if (!_bt_readnextpage(scan, blkno, dir))
1961  return false;
1962 
1963  /* Drop the lock, and maybe the pin, on the current page */
1965 
1966  return true;
1967 }
1968 
1969 /*
1970  * _bt_readnextpage() -- Read next page containing valid data for scan
1971  *
1972  * On success exit, so->currPos is updated to contain data from the next
1973  * interesting page. Caller is responsible to release lock and pin on
1974  * buffer on success. We return true to indicate success.
1975  *
1976  * If there are no more matching records in the given direction, we drop all
1977  * locks and pins, set so->currPos.buf to InvalidBuffer, and return false.
1978  */
1979 static bool
1981 {
1982  BTScanOpaque so = (BTScanOpaque) scan->opaque;
1983  Relation rel;
1984  Page page;
1985  BTPageOpaque opaque;
1986  bool status;
1987 
1988  rel = scan->indexRelation;
1989 
1990  if (ScanDirectionIsForward(dir))
1991  {
1992  for (;;)
1993  {
1994  /*
1995  * if we're at end of scan, give up and mark parallel scan as
1996  * done, so that all the workers can finish their scan
1997  */
1998  if (blkno == P_NONE || !so->currPos.moreRight)
1999  {
2000  _bt_parallel_done(scan);
2002  return false;
2003  }
2004  /* check for interrupts while we're not holding any buffer lock */
2006  /* step right one page */
2007  so->currPos.buf = _bt_getbuf(rel, blkno, BT_READ);
2008  page = BufferGetPage(so->currPos.buf);
2009  TestForOldSnapshot(scan->xs_snapshot, rel, page);
2010  opaque = BTPageGetOpaque(page);
2011  /* check for deleted page */
2012  if (!P_IGNORE(opaque))
2013  {
2014  PredicateLockPage(rel, blkno, scan->xs_snapshot);
2015  /* see if there are any matches on this page */
2016  /* note that this will clear moreRight if we can stop */
2017  if (_bt_readpage(scan, dir, P_FIRSTDATAKEY(opaque)))
2018  break;
2019  }
2020  else if (scan->parallel_scan != NULL)
2021  {
2022  /* allow next page be processed by parallel worker */
2023  _bt_parallel_release(scan, opaque->btpo_next);
2024  }
2025 
2026  /* nope, keep going */
2027  if (scan->parallel_scan != NULL)
2028  {
2029  _bt_relbuf(rel, so->currPos.buf);
2030  status = _bt_parallel_seize(scan, &blkno);
2031  if (!status)
2032  {
2034  return false;
2035  }
2036  }
2037  else
2038  {
2039  blkno = opaque->btpo_next;
2040  _bt_relbuf(rel, so->currPos.buf);
2041  }
2042  }
2043  }
2044  else
2045  {
2046  /*
2047  * Should only happen in parallel cases, when some other backend
2048  * advanced the scan.
2049  */
2050  if (so->currPos.currPage != blkno)
2051  {
2053  so->currPos.currPage = blkno;
2054  }
2055 
2056  /*
2057  * Walk left to the next page with data. This is much more complex
2058  * than the walk-right case because of the possibility that the page
2059  * to our left splits while we are in flight to it, plus the
2060  * possibility that the page we were on gets deleted after we leave
2061  * it. See nbtree/README for details.
2062  *
2063  * It might be possible to rearrange this code to have less overhead
2064  * in pinning and locking, but that would require capturing the left
2065  * pointer when the page is initially read, and using it here, along
2066  * with big changes to _bt_walk_left() and the code below. It is not
2067  * clear whether this would be a win, since if the page immediately to
2068  * the left splits after we read this page and before we step left, we
2069  * would need to visit more pages than with the current code.
2070  *
2071  * Note that if we change the code so that we drop the pin for a scan
2072  * which uses a non-MVCC snapshot, we will need to modify the code for
2073  * walking left, to allow for the possibility that a referenced page
2074  * has been deleted. As long as the buffer is pinned or the snapshot
2075  * is MVCC the page cannot move past the half-dead state to fully
2076  * deleted.
2077  */
2078  if (BTScanPosIsPinned(so->currPos))
2079  _bt_lockbuf(rel, so->currPos.buf, BT_READ);
2080  else
2081  so->currPos.buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ);
2082 
2083  for (;;)
2084  {
2085  /* Done if we know there are no matching keys to the left */
2086  if (!so->currPos.moreLeft)
2087  {
2088  _bt_relbuf(rel, so->currPos.buf);
2089  _bt_parallel_done(scan);
2091  return false;
2092  }
2093 
2094  /* Step to next physical page */
2095  so->currPos.buf = _bt_walk_left(rel, so->currPos.buf,
2096  scan->xs_snapshot);
2097 
2098  /* if we're physically at end of index, return failure */
2099  if (so->currPos.buf == InvalidBuffer)
2100  {
2101  _bt_parallel_done(scan);
2103  return false;
2104  }
2105 
2106  /*
2107  * Okay, we managed to move left to a non-deleted page. Done if
2108  * it's not half-dead and contains matching tuples. Else loop back
2109  * and do it all again.
2110  */
2111  page = BufferGetPage(so->currPos.buf);
2112  TestForOldSnapshot(scan->xs_snapshot, rel, page);
2113  opaque = BTPageGetOpaque(page);
2114  if (!P_IGNORE(opaque))
2115  {
2117  /* see if there are any matches on this page */
2118  /* note that this will clear moreLeft if we can stop */
2119  if (_bt_readpage(scan, dir, PageGetMaxOffsetNumber(page)))
2120  break;
2121  }
2122  else if (scan->parallel_scan != NULL)
2123  {
2124  /* allow next page be processed by parallel worker */
2126  }
2127 
2128  /*
2129  * For parallel scans, get the last page scanned as it is quite
2130  * possible that by the time we try to seize the scan, some other
2131  * worker has already advanced the scan to a different page. We
2132  * must continue based on the latest page scanned by any worker.
2133  */
2134  if (scan->parallel_scan != NULL)
2135  {
2136  _bt_relbuf(rel, so->currPos.buf);
2137  status = _bt_parallel_seize(scan, &blkno);
2138  if (!status)
2139  {
2141  return false;
2142  }
2143  so->currPos.buf = _bt_getbuf(rel, blkno, BT_READ);
2144  }
2145  }
2146  }
2147 
2148  return true;
2149 }
2150 
2151 /*
2152  * _bt_parallel_readpage() -- Read current page containing valid data for scan
2153  *
2154  * On success, release lock and maybe pin on buffer. We return true to
2155  * indicate success.
2156  */
2157 static bool
2159 {
2160  BTScanOpaque so = (BTScanOpaque) scan->opaque;
2161 
2162  _bt_initialize_more_data(so, dir);
2163 
2164  if (!_bt_readnextpage(scan, blkno, dir))
2165  return false;
2166 
2167  /* Drop the lock, and maybe the pin, on the current page */
2169 
2170  return true;
2171 }
2172 
2173 /*
2174  * _bt_walk_left() -- step left one page, if possible
2175  *
2176  * The given buffer must be pinned and read-locked. This will be dropped
2177  * before stepping left. On return, we have pin and read lock on the
2178  * returned page, instead.
2179  *
2180  * Returns InvalidBuffer if there is no page to the left (no lock is held
2181  * in that case).
2182  *
2183  * When working on a non-leaf level, it is possible for the returned page
2184  * to be half-dead; the caller should check that condition and step left
2185  * again if it's important.
2186  */
2187 static Buffer
2189 {
2190  Page page;
2191  BTPageOpaque opaque;
2192 
2193  page = BufferGetPage(buf);
2194  opaque = BTPageGetOpaque(page);
2195 
2196  for (;;)
2197  {
2198  BlockNumber obknum;
2199  BlockNumber lblkno;
2200  BlockNumber blkno;
2201  int tries;
2202 
2203  /* if we're at end of tree, release buf and return failure */
2204  if (P_LEFTMOST(opaque))
2205  {
2206  _bt_relbuf(rel, buf);
2207  break;
2208  }
2209  /* remember original page we are stepping left from */
2210  obknum = BufferGetBlockNumber(buf);
2211  /* step left */
2212  blkno = lblkno = opaque->btpo_prev;
2213  _bt_relbuf(rel, buf);
2214  /* check for interrupts while we're not holding any buffer lock */
2216  buf = _bt_getbuf(rel, blkno, BT_READ);
2217  page = BufferGetPage(buf);
2218  TestForOldSnapshot(snapshot, rel, page);
2219  opaque = BTPageGetOpaque(page);
2220 
2221  /*
2222  * If this isn't the page we want, walk right till we find what we
2223  * want --- but go no more than four hops (an arbitrary limit). If we
2224  * don't find the correct page by then, the most likely bet is that
2225  * the original page got deleted and isn't in the sibling chain at all
2226  * anymore, not that its left sibling got split more than four times.
2227  *
2228  * Note that it is correct to test P_ISDELETED not P_IGNORE here,
2229  * because half-dead pages are still in the sibling chain. Caller
2230  * must reject half-dead pages if wanted.
2231  */
2232  tries = 0;
2233  for (;;)
2234  {
2235  if (!P_ISDELETED(opaque) && opaque->btpo_next == obknum)
2236  {
2237  /* Found desired page, return it */
2238  return buf;
2239  }
2240  if (P_RIGHTMOST(opaque) || ++tries > 4)
2241  break;
2242  blkno = opaque->btpo_next;
2243  buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
2244  page = BufferGetPage(buf);
2245  TestForOldSnapshot(snapshot, rel, page);
2246  opaque = BTPageGetOpaque(page);
2247  }
2248 
2249  /* Return to the original page to see what's up */
2250  buf = _bt_relandgetbuf(rel, buf, obknum, BT_READ);
2251  page = BufferGetPage(buf);
2252  TestForOldSnapshot(snapshot, rel, page);
2253  opaque = BTPageGetOpaque(page);
2254  if (P_ISDELETED(opaque))
2255  {
2256  /*
2257  * It was deleted. Move right to first nondeleted page (there
2258  * must be one); that is the page that has acquired the deleted
2259  * one's keyspace, so stepping left from it will take us where we
2260  * want to be.
2261  */
2262  for (;;)
2263  {
2264  if (P_RIGHTMOST(opaque))
2265  elog(ERROR, "fell off the end of index \"%s\"",
2267  blkno = opaque->btpo_next;
2268  buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
2269  page = BufferGetPage(buf);
2270  TestForOldSnapshot(snapshot, rel, page);
2271  opaque = BTPageGetOpaque(page);
2272  if (!P_ISDELETED(opaque))
2273  break;
2274  }
2275 
2276  /*
2277  * Now return to top of loop, resetting obknum to point to this
2278  * nondeleted page, and try again.
2279  */
2280  }
2281  else
2282  {
2283  /*
2284  * It wasn't deleted; the explanation had better be that the page
2285  * to the left got split or deleted. Without this check, we'd go
2286  * into an infinite loop if there's anything wrong.
2287  */
2288  if (opaque->btpo_prev == lblkno)
2289  elog(ERROR, "could not find left sibling of block %u in index \"%s\"",
2290  obknum, RelationGetRelationName(rel));
2291  /* Okay to try again with new lblkno value */
2292  }
2293  }
2294 
2295  return InvalidBuffer;
2296 }
2297 
2298 /*
2299  * _bt_get_endpoint() -- Find the first or last page on a given tree level
2300  *
2301  * If the index is empty, we will return InvalidBuffer; any other failure
2302  * condition causes ereport(). We will not return a dead page.
2303  *
2304  * The returned buffer is pinned and read-locked.
2305  */
2306 Buffer
2307 _bt_get_endpoint(Relation rel, uint32 level, bool rightmost,
2308  Snapshot snapshot)
2309 {
2310  Buffer buf;
2311  Page page;
2312  BTPageOpaque opaque;
2313  OffsetNumber offnum;
2314  BlockNumber blkno;
2315  IndexTuple itup;
2316 
2317  /*
2318  * If we are looking for a leaf page, okay to descend from fast root;
2319  * otherwise better descend from true root. (There is no point in being
2320  * smarter about intermediate levels.)
2321  */
2322  if (level == 0)
2323  buf = _bt_getroot(rel, BT_READ);
2324  else
2325  buf = _bt_gettrueroot(rel);
2326 
2327  if (!BufferIsValid(buf))
2328  return InvalidBuffer;
2329 
2330  page = BufferGetPage(buf);
2331  TestForOldSnapshot(snapshot, rel, page);
2332  opaque = BTPageGetOpaque(page);
2333 
2334  for (;;)
2335  {
2336  /*
2337  * If we landed on a deleted page, step right to find a live page
2338  * (there must be one). Also, if we want the rightmost page, step
2339  * right if needed to get to it (this could happen if the page split
2340  * since we obtained a pointer to it).
2341  */
2342  while (P_IGNORE(opaque) ||
2343  (rightmost && !P_RIGHTMOST(opaque)))
2344  {
2345  blkno = opaque->btpo_next;
2346  if (blkno == P_NONE)
2347  elog(ERROR, "fell off the end of index \"%s\"",
2349  buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
2350  page = BufferGetPage(buf);
2351  TestForOldSnapshot(snapshot, rel, page);
2352  opaque = BTPageGetOpaque(page);
2353  }
2354 
2355  /* Done? */
2356  if (opaque->btpo_level == level)
2357  break;
2358  if (opaque->btpo_level < level)
2359  ereport(ERROR,
2360  (errcode(ERRCODE_INDEX_CORRUPTED),
2361  errmsg_internal("btree level %u not found in index \"%s\"",
2362  level, RelationGetRelationName(rel))));
2363 
2364  /* Descend to leftmost or rightmost child page */
2365  if (rightmost)
2366  offnum = PageGetMaxOffsetNumber(page);
2367  else
2368  offnum = P_FIRSTDATAKEY(opaque);
2369 
2370  itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
2371  blkno = BTreeTupleGetDownLink(itup);
2372 
2373  buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
2374  page = BufferGetPage(buf);
2375  opaque = BTPageGetOpaque(page);
2376  }
2377 
2378  return buf;
2379 }
2380 
2381 /*
2382  * _bt_endpoint() -- Find the first or last page in the index, and scan
2383  * from there to the first key satisfying all the quals.
2384  *
2385  * This is used by _bt_first() to set up a scan when we've determined
2386  * that the scan must start at the beginning or end of the index (for
2387  * a forward or backward scan respectively). Exit conditions are the
2388  * same as for _bt_first().
2389  */
2390 static bool
2392 {
2393  Relation rel = scan->indexRelation;
2394  BTScanOpaque so = (BTScanOpaque) scan->opaque;
2395  Buffer buf;
2396  Page page;
2397  BTPageOpaque opaque;
2398  OffsetNumber start;
2399  BTScanPosItem *currItem;
2400 
2401  /*
2402  * Scan down to the leftmost or rightmost leaf page. This is a simplified
2403  * version of _bt_search(). We don't maintain a stack since we know we
2404  * won't need it.
2405  */
2407 
2408  if (!BufferIsValid(buf))
2409  {
2410  /*
2411  * Empty index. Lock the whole relation, as nothing finer to lock
2412  * exists.
2413  */
2414  PredicateLockRelation(rel, scan->xs_snapshot);
2416  return false;
2417  }
2418 
2420  page = BufferGetPage(buf);
2421  opaque = BTPageGetOpaque(page);
2422  Assert(P_ISLEAF(opaque));
2423 
2424  if (ScanDirectionIsForward(dir))
2425  {
2426  /* There could be dead pages to the left, so not this: */
2427  /* Assert(P_LEFTMOST(opaque)); */
2428 
2429  start = P_FIRSTDATAKEY(opaque);
2430  }
2431  else if (ScanDirectionIsBackward(dir))
2432  {
2433  Assert(P_RIGHTMOST(opaque));
2434 
2435  start = PageGetMaxOffsetNumber(page);
2436  }
2437  else
2438  {
2439  elog(ERROR, "invalid scan direction: %d", (int) dir);
2440  start = 0; /* keep compiler quiet */
2441  }
2442 
2443  /* remember which buffer we have pinned */
2444  so->currPos.buf = buf;
2445 
2446  _bt_initialize_more_data(so, dir);
2447 
2448  /*
2449  * Now load data from the first page of the scan.
2450  */
2451  if (!_bt_readpage(scan, dir, start))
2452  {
2453  /*
2454  * There's no actually-matching data on this page. Try to advance to
2455  * the next page. Return false if there's no matching data at all.
2456  */
2457  _bt_unlockbuf(scan->indexRelation, so->currPos.buf);
2458  if (!_bt_steppage(scan, dir))
2459  return false;
2460  }
2461  else
2462  {
2463  /* Drop the lock, and maybe the pin, on the current page */
2465  }
2466 
2467  /* OK, itemIndex says what to return */
2468  currItem = &so->currPos.items[so->currPos.itemIndex];
2469  scan->xs_heaptid = currItem->heapTid;
2470  if (scan->xs_want_itup)
2471  scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset);
2472 
2473  return true;
2474 }
2475 
2476 /*
2477  * _bt_initialize_more_data() -- initialize moreLeft/moreRight appropriately
2478  * for scan direction
2479  */
2480 static inline void
2482 {
2483  /* initialize moreLeft/moreRight appropriately for scan direction */
2484  if (ScanDirectionIsForward(dir))
2485  {
2486  so->currPos.moreLeft = false;
2487  so->currPos.moreRight = true;
2488  }
2489  else
2490  {
2491  so->currPos.moreLeft = true;
2492  so->currPos.moreRight = false;
2493  }
2494  so->numKilled = 0; /* just paranoia */
2495  so->markItemIndex = -1; /* ditto */
2496 }
int16 AttrNumber
Definition: attnum.h:21
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
void IncrBufferRefCount(Buffer buffer)
Definition: bufmgr.c:3969
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2763
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3931
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:3012
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:280
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:303
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:228
Pointer Page
Definition: bufpage.h:78
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
unsigned int uint32
Definition: c.h:442
#define RegProcedureIsValid(p)
Definition: c.h:713
#define Min(x, y)
Definition: c.h:937
#define INVERT_COMPARE_RESULT(var)
Definition: c.h:1063
#define MAXALIGN(LEN)
Definition: c.h:747
signed int int32
Definition: c.h:430
#define Max(x, y)
Definition: c.h:931
regproc RegProcedure
Definition: c.h:586
#define unlikely(x)
Definition: c.h:295
size_t Size
Definition: c.h:541
struct cursor * cur
Definition: ecpg.c:28
int errmsg_internal(const char *fmt,...)
Definition: elog.c:993
int errcode(int sqlerrcode)
Definition: elog.c:695
#define ERROR
Definition: elog.h:35
#define ereport(elevel,...)
Definition: elog.h:145
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:1134
FmgrInfo * index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:803
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:117
#define INDEX_SIZE_MASK
Definition: itup.h:65
Assert(fmt[strlen(fmt) - 1] !='\n')
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:795
void * palloc(Size size)
Definition: mcxt.c:1199
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
void _bt_finish_split(Relation rel, Buffer lbuf, BTStack stack)
Definition: nbtinsert.c:2230
Buffer _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
Definition: nbtpage.c:1015
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1035
Buffer _bt_gettrueroot(Relation rel)
Definition: nbtpage.c:577
Buffer _bt_getroot(Relation rel, int access)
Definition: nbtpage.c:343
void _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
Definition: nbtpage.c:736
Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access)
Definition: nbtpage.c:871
void _bt_unlockbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1082
void _bt_lockbuf(Relation rel, Buffer buf, int access)
Definition: nbtpage.c:1051
void _bt_parallel_release(IndexScanDesc scan, BlockNumber scan_page)
Definition: nbtree.c:693
void _bt_parallel_done(IndexScanDesc scan)
Definition: nbtree.c:716
bool _bt_parallel_seize(IndexScanDesc scan, BlockNumber *pageno)
Definition: nbtree.c:635
#define BTScanPosIsPinned(scanpos)
Definition: nbtree.h:989
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:512
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:474
BTStackData * BTStack
Definition: nbtree.h:733
#define P_ISLEAF(opaque)
Definition: nbtree.h:220
#define P_HIKEY
Definition: nbtree.h:368
#define BTORDER_PROC
Definition: nbtree.h:701
#define P_LEFTMOST(opaque)
Definition: nbtree.h:218
#define BTPageGetOpaque(page)
Definition: nbtree.h:73
#define P_ISDELETED(opaque)
Definition: nbtree.h:222
#define MaxTIDsPerBTreePage
Definition: nbtree.h:185
#define BTScanPosIsValid(scanpos)
Definition: nbtree.h:1006
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:370
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition: nbtree.h:523
#define P_NONE
Definition: nbtree.h:212
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:219
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1084
#define P_INCOMPLETE_SPLIT(opaque)
Definition: nbtree.h:227
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:538
#define BT_READ
Definition: nbtree.h:713
static BlockNumber BTreeTupleGetDownLink(IndexTuple pivot)
Definition: nbtree.h:550
#define SK_BT_DESC
Definition: nbtree.h:1083
#define P_IGNORE(opaque)
Definition: nbtree.h:225
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:658
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:486
#define BTScanPosInvalidate(scanpos)
Definition: nbtree.h:1012
#define BTScanPosUnpinIfPinned(scanpos)
Definition: nbtree.h:1000
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:632
#define BT_WRITE
Definition: nbtree.h:714
#define BTreeTupleGetNAtts(itup, rel)
Definition: nbtree.h:571
BTScanOpaqueData * BTScanOpaque
Definition: nbtree.h:1073
static bool _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir)
Definition: nbtsearch.c:1980
static int _bt_binsrch_posting(BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:570
bool _bt_first(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:860
static void _bt_saveitem(BTScanOpaque so, int itemIndex, OffsetNumber offnum, IndexTuple itup)
Definition: nbtsearch.c:1777
static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum)
Definition: nbtsearch.c:1521
static bool _bt_parallel_readpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir)
Definition: nbtsearch.c:2158
static int _bt_setuppostingitems(BTScanOpaque so, int itemIndex, OffsetNumber offnum, ItemPointer heapTid, IndexTuple itup)
Definition: nbtsearch.c:1807
static void _bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp)
Definition: nbtsearch.c:60
static OffsetNumber _bt_binsrch(Relation rel, BTScanInsert key, Buffer buf)
Definition: nbtsearch.c:338
static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:2391
static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:1873
static void _bt_savepostingitem(BTScanOpaque so, int itemIndex, OffsetNumber offnum, ItemPointer heapTid, int tupleOffset)
Definition: nbtsearch.c:1845
OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate)
Definition: nbtsearch.c:442
bool _bt_next(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:1466
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:656
Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost, Snapshot snapshot)
Definition: nbtsearch.c:2307
static void _bt_initialize_more_data(BTScanOpaque so, ScanDirection dir)
Definition: nbtsearch.c:2481
BTStack _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access, Snapshot snapshot)
Definition: nbtsearch.c:96
static Buffer _bt_walk_left(Relation rel, Buffer buf, Snapshot snapshot)
Definition: nbtsearch.c:2188
Buffer _bt_moveright(Relation rel, BTScanInsert key, Buffer buf, bool forupdate, BTStack stack, int access, Snapshot snapshot)
Definition: nbtsearch.c:236
void _bt_freestack(BTStack stack)
Definition: nbtutils.c:182
void _bt_killitems(IndexScanDesc scan)
Definition: nbtutils.c:1725
bool _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple, int tupnatts, ScanDirection dir, bool *continuescan)
Definition: nbtutils.c:1362
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition: nbtutils.c:2471
void _bt_preprocess_keys(IndexScanDesc scan)
Definition: nbtutils.c:749
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
#define INDEX_MAX_KEYS
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
static char * buf
Definition: pg_test_fsync.c:67
#define pgstat_count_index_scan(rel)
Definition: pgstat.h:540
uintptr_t Datum
Definition: postgres.h:412
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:660
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:550
#define InvalidOid
Definition: postgres_ext.h:36
void PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
Definition: predicate.c:2595
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2572
short access
Definition: preproc-type.c:36
#define RelationGetDescr(relation)
Definition: rel.h:527
#define RelationGetRelationName(relation)
Definition: rel.h:535
#define RelationNeedsWAL(relation)
Definition: rel.h:626
#define IndexRelationGetNumberOfAttributes(relation)
Definition: rel.h:513
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:520
void ScanKeyEntryInitialize(ScanKey entry, int flags, AttrNumber attributeNumber, StrategyNumber strategy, Oid subtype, Oid collation, RegProcedure procedure, Datum argument)
Definition: scankey.c:32
void ScanKeyEntryInitializeWithInfo(ScanKey entry, int flags, AttrNumber attributeNumber, StrategyNumber strategy, Oid subtype, Oid collation, FmgrInfo *finfo, Datum argument)
Definition: scankey.c:101
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
#define ScanDirectionIsBackward(direction)
Definition: sdir.h:41
ScanDirection
Definition: sdir.h:23
#define SK_ROW_HEADER
Definition: skey.h:117
#define SK_ROW_MEMBER
Definition: skey.h:118
#define SK_SEARCHNOTNULL
Definition: skey.h:122
#define SK_ROW_END
Definition: skey.h:119
ScanKeyData * ScanKey
Definition: skey.h:75
#define SK_ISNULL
Definition: skey.h:115
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:96
uint16 StrategyNumber
Definition: stratnum.h:22
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define InvalidStrategy
Definition: stratnum.h:24
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define BTLessEqualStrategyNumber
Definition: stratnum.h:30
#define BTGreaterEqualStrategyNumber
Definition: stratnum.h:32
OffsetNumber stricthigh
Definition: nbtree.h:824
bool bounds_valid
Definition: nbtree.h:822
OffsetNumber low
Definition: nbtree.h:823
BTScanInsert itup_key
Definition: nbtree.h:812
BlockNumber btpo_next
Definition: nbtree.h:65
BlockNumber btpo_prev
Definition: nbtree.h:64
uint32 btpo_level
Definition: nbtree.h:66
char * markTuples
Definition: nbtree.h:1057
BTScanPosData currPos
Definition: nbtree.h:1069
char * currTuples
Definition: nbtree.h:1056
BTScanPosData markPos
Definition: nbtree.h:1070
ScanKey keyData
Definition: nbtree.h:1036
bool moreRight
Definition: nbtree.h:965
Buffer buf
Definition: nbtree.h:952
BlockNumber currPage
Definition: nbtree.h:955
int firstItem
Definition: nbtree.h:980
int nextTupleOffset
Definition: nbtree.h:971
BlockNumber nextPage
Definition: nbtree.h:956
bool moreLeft
Definition: nbtree.h:964
int lastItem
Definition: nbtree.h:981
BTScanPosItem items[MaxTIDsPerBTreePage]
Definition: nbtree.h:984
int itemIndex
Definition: nbtree.h:982
XLogRecPtr lsn
Definition: nbtree.h:954
ItemPointerData heapTid
Definition: nbtree.h:945
LocationIndex tupleOffset
Definition: nbtree.h:947
OffsetNumber indexOffset
Definition: nbtree.h:946
BlockNumber bts_blkno
Definition: nbtree.h:728
struct BTStackData * bts_parent
Definition: nbtree.h:730
OffsetNumber bts_offset
Definition: nbtree.h:729
Definition: fmgr.h:57
struct ParallelIndexScanDescData * parallel_scan
Definition: relscan.h:166
bool ignore_killed_tuples
Definition: relscan.h:129
IndexTuple xs_itup
Definition: relscan.h:142
Relation indexRelation
Definition: relscan.h:118
ItemPointerData xs_heaptid
Definition: relscan.h:147
struct SnapshotData * xs_snapshot
Definition: relscan.h:119
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49
Oid * rd_opcintype
Definition: rel.h:204
Oid * rd_opfamily
Definition: rel.h:203
int sk_flags
Definition: skey.h:66
Datum sk_argument
Definition: skey.h:72
FmgrInfo sk_func
Definition: skey.h:71
Oid sk_collation
Definition: skey.h:70
StrategyNumber sk_strategy
Definition: skey.h:68
AttrNumber sk_attno
Definition: skey.h:67