PostgreSQL Source Code  git master
nbtsearch.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nbtsearch.c
4  * Search code for postgres btrees.
5  *
6  *
7  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * IDENTIFICATION
11  * src/backend/access/nbtree/nbtsearch.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 
16 #include "postgres.h"
17 
18 #include "access/nbtree.h"
19 #include "access/relscan.h"
20 #include "access/xact.h"
21 #include "miscadmin.h"
22 #include "pgstat.h"
23 #include "storage/predicate.h"
24 #include "utils/lsyscache.h"
25 #include "utils/rel.h"
26 
27 
30 static int _bt_binsrch_posting(BTScanInsert key, Page page,
31  OffsetNumber offnum);
32 static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir,
33  OffsetNumber offnum);
34 static void _bt_saveitem(BTScanOpaque so, int itemIndex,
35  OffsetNumber offnum, IndexTuple itup);
36 static int _bt_setuppostingitems(BTScanOpaque so, int itemIndex,
37  OffsetNumber offnum, ItemPointer heapTid,
38  IndexTuple itup);
39 static inline void _bt_savepostingitem(BTScanOpaque so, int itemIndex,
40  OffsetNumber offnum,
41  ItemPointer heapTid, int tupleOffset);
42 static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir);
43 static bool _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir);
44 static bool _bt_parallel_readpage(IndexScanDesc scan, BlockNumber blkno,
45  ScanDirection dir);
47 static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
48 static inline void _bt_initialize_more_data(BTScanOpaque so, ScanDirection dir);
49 
50 
51 /*
52  * _bt_drop_lock_and_maybe_pin()
53  *
54  * Unlock the buffer; and if it is safe to release the pin, do that, too.
55  * This will prevent vacuum from stalling in a blocked state trying to read a
56  * page when a cursor is sitting on it.
57  *
58  * See nbtree/README section on making concurrent TID recycling safe.
59  */
60 static void
62 {
63  _bt_unlockbuf(scan->indexRelation, sp->buf);
64 
65  if (IsMVCCSnapshot(scan->xs_snapshot) &&
67  !scan->xs_want_itup)
68  {
69  ReleaseBuffer(sp->buf);
70  sp->buf = InvalidBuffer;
71  }
72 }
73 
74 /*
75  * _bt_search() -- Search the tree for a particular scankey,
76  * or more precisely for the first leaf page it could be on.
77  *
78  * The passed scankey is an insertion-type scankey (see nbtree/README),
79  * but it can omit the rightmost column(s) of the index.
80  *
81  * Return value is a stack of parent-page pointers (i.e. there is no entry for
82  * the leaf level/page). *bufP is set to the address of the leaf-page buffer,
83  * which is locked and pinned. No locks are held on the parent pages,
84  * however!
85  *
86  * The returned buffer is locked according to access parameter. Additionally,
87  * access = BT_WRITE will allow an empty root page to be created and returned.
88  * When access = BT_READ, an empty index will result in *bufP being set to
89  * InvalidBuffer. Also, in BT_WRITE mode, any incomplete splits encountered
90  * during the search will be finished.
91  *
92  * heaprel must be provided by callers that pass access = BT_WRITE, since we
93  * might need to allocate a new root page for caller -- see _bt_allocbuf.
94  */
95 BTStack
97  int access)
98 {
99  BTStack stack_in = NULL;
100  int page_access = BT_READ;
101 
102  /* heaprel must be set whenever _bt_allocbuf is reachable */
103  Assert(access == BT_READ || access == BT_WRITE);
104  Assert(access == BT_READ || heaprel != NULL);
105 
106  /* Get the root page to start with */
107  *bufP = _bt_getroot(rel, heaprel, access);
108 
109  /* If index is empty and access = BT_READ, no root page is created. */
110  if (!BufferIsValid(*bufP))
111  return (BTStack) NULL;
112 
113  /* Loop iterates once per level descended in the tree */
114  for (;;)
115  {
116  Page page;
117  BTPageOpaque opaque;
118  OffsetNumber offnum;
119  ItemId itemid;
120  IndexTuple itup;
121  BlockNumber child;
122  BTStack new_stack;
123 
124  /*
125  * Race -- the page we just grabbed may have split since we read its
126  * downlink in its parent page (or the metapage). If it has, we may
127  * need to move right to its new sibling. Do that.
128  *
129  * In write-mode, allow _bt_moveright to finish any incomplete splits
130  * along the way. Strictly speaking, we'd only need to finish an
131  * incomplete split on the leaf page we're about to insert to, not on
132  * any of the upper levels (internal pages with incomplete splits are
133  * also taken care of in _bt_getstackbuf). But this is a good
134  * opportunity to finish splits of internal pages too.
135  */
136  *bufP = _bt_moveright(rel, heaprel, key, *bufP, (access == BT_WRITE),
137  stack_in, page_access);
138 
139  /* if this is a leaf page, we're done */
140  page = BufferGetPage(*bufP);
141  opaque = BTPageGetOpaque(page);
142  if (P_ISLEAF(opaque))
143  break;
144 
145  /*
146  * Find the appropriate pivot tuple on this page. Its downlink points
147  * to the child page that we're about to descend to.
148  */
149  offnum = _bt_binsrch(rel, key, *bufP);
150  itemid = PageGetItemId(page, offnum);
151  itup = (IndexTuple) PageGetItem(page, itemid);
152  Assert(BTreeTupleIsPivot(itup) || !key->heapkeyspace);
153  child = BTreeTupleGetDownLink(itup);
154 
155  /*
156  * We need to save the location of the pivot tuple we chose in a new
157  * stack entry for this page/level. If caller ends up splitting a
158  * page one level down, it usually ends up inserting a new pivot
159  * tuple/downlink immediately after the location recorded here.
160  */
161  new_stack = (BTStack) palloc(sizeof(BTStackData));
162  new_stack->bts_blkno = BufferGetBlockNumber(*bufP);
163  new_stack->bts_offset = offnum;
164  new_stack->bts_parent = stack_in;
165 
166  /*
167  * Page level 1 is lowest non-leaf page level prior to leaves. So, if
168  * we're on the level 1 and asked to lock leaf page in write mode,
169  * then lock next page in write mode, because it must be a leaf.
170  */
171  if (opaque->btpo_level == 1 && access == BT_WRITE)
172  page_access = BT_WRITE;
173 
174  /* drop the read lock on the page, then acquire one on its child */
175  *bufP = _bt_relandgetbuf(rel, *bufP, child, page_access);
176 
177  /* okay, all set to move down a level */
178  stack_in = new_stack;
179  }
180 
181  /*
182  * If we're asked to lock leaf in write mode, but didn't manage to, then
183  * relock. This should only happen when the root page is a leaf page (and
184  * the only page in the index other than the metapage).
185  */
186  if (access == BT_WRITE && page_access == BT_READ)
187  {
188  /* trade in our read lock for a write lock */
189  _bt_unlockbuf(rel, *bufP);
190  _bt_lockbuf(rel, *bufP, BT_WRITE);
191 
192  /*
193  * Race -- the leaf page may have split after we dropped the read lock
194  * but before we acquired a write lock. If it has, we may need to
195  * move right to its new sibling. Do that.
196  */
197  *bufP = _bt_moveright(rel, heaprel, key, *bufP, true, stack_in, BT_WRITE);
198  }
199 
200  return stack_in;
201 }
202 
203 /*
204  * _bt_moveright() -- move right in the btree if necessary.
205  *
206  * When we follow a pointer to reach a page, it is possible that
207  * the page has changed in the meanwhile. If this happens, we're
208  * guaranteed that the page has "split right" -- that is, that any
209  * data that appeared on the page originally is either on the page
210  * or strictly to the right of it.
211  *
212  * This routine decides whether or not we need to move right in the
213  * tree by examining the high key entry on the page. If that entry is
214  * strictly less than the scankey, or <= the scankey in the
215  * key.nextkey=true case, then we followed the wrong link and we need
216  * to move right.
217  *
218  * The passed insertion-type scankey can omit the rightmost column(s) of the
219  * index. (see nbtree/README)
220  *
221  * When key.nextkey is false (the usual case), we are looking for the first
222  * item >= key. When key.nextkey is true, we are looking for the first item
223  * strictly greater than key.
224  *
225  * If forupdate is true, we will attempt to finish any incomplete splits
226  * that we encounter. This is required when locking a target page for an
227  * insertion, because we don't allow inserting on a page before the split is
228  * completed. 'heaprel' and 'stack' are only used if forupdate is true.
229  *
230  * On entry, we have the buffer pinned and a lock of the type specified by
231  * 'access'. If we move right, we release the buffer and lock and acquire
232  * the same on the right sibling. Return value is the buffer we stop at.
233  */
234 Buffer
236  Relation heaprel,
238  Buffer buf,
239  bool forupdate,
240  BTStack stack,
241  int access)
242 {
243  Page page;
244  BTPageOpaque opaque;
245  int32 cmpval;
246 
247  Assert(!forupdate || heaprel != NULL);
248 
249  /*
250  * When nextkey = false (normal case): if the scan key that brought us to
251  * this page is > the high key stored on the page, then the page has split
252  * and we need to move right. (pg_upgrade'd !heapkeyspace indexes could
253  * have some duplicates to the right as well as the left, but that's
254  * something that's only ever dealt with on the leaf level, after
255  * _bt_search has found an initial leaf page.)
256  *
257  * When nextkey = true: move right if the scan key is >= page's high key.
258  * (Note that key.scantid cannot be set in this case.)
259  *
260  * The page could even have split more than once, so scan as far as
261  * needed.
262  *
263  * We also have to move right if we followed a link that brought us to a
264  * dead page.
265  */
266  cmpval = key->nextkey ? 0 : 1;
267 
268  for (;;)
269  {
270  page = BufferGetPage(buf);
271  opaque = BTPageGetOpaque(page);
272 
273  if (P_RIGHTMOST(opaque))
274  break;
275 
276  /*
277  * Finish any incomplete splits we encounter along the way.
278  */
279  if (forupdate && P_INCOMPLETE_SPLIT(opaque))
280  {
282 
283  /* upgrade our lock if necessary */
284  if (access == BT_READ)
285  {
286  _bt_unlockbuf(rel, buf);
287  _bt_lockbuf(rel, buf, BT_WRITE);
288  }
289 
290  if (P_INCOMPLETE_SPLIT(opaque))
291  _bt_finish_split(rel, heaprel, buf, stack);
292  else
293  _bt_relbuf(rel, buf);
294 
295  /* re-acquire the lock in the right mode, and re-check */
296  buf = _bt_getbuf(rel, blkno, access);
297  continue;
298  }
299 
300  if (P_IGNORE(opaque) || _bt_compare(rel, key, page, P_HIKEY) >= cmpval)
301  {
302  /* step right one page */
303  buf = _bt_relandgetbuf(rel, buf, opaque->btpo_next, access);
304  continue;
305  }
306  else
307  break;
308  }
309 
310  if (P_IGNORE(opaque))
311  elog(ERROR, "fell off the end of index \"%s\"",
313 
314  return buf;
315 }
316 
317 /*
318  * _bt_binsrch() -- Do a binary search for a key on a particular page.
319  *
320  * On a leaf page, _bt_binsrch() returns the OffsetNumber of the first
321  * key >= given scankey, or > scankey if nextkey is true. (NOTE: in
322  * particular, this means it is possible to return a value 1 greater than the
323  * number of keys on the page, if the scankey is > all keys on the page.)
324  *
325  * On an internal (non-leaf) page, _bt_binsrch() returns the OffsetNumber
326  * of the last key < given scankey, or last key <= given scankey if nextkey
327  * is true. (Since _bt_compare treats the first data key of such a page as
328  * minus infinity, there will be at least one key < scankey, so the result
329  * always points at one of the keys on the page.) This key indicates the
330  * right place to descend to be sure we find all leaf keys >= given scankey
331  * (or leaf keys > given scankey when nextkey is true).
332  *
333  * This procedure is not responsible for walking right, it just examines
334  * the given page. _bt_binsrch() has no lock or refcount side effects
335  * on the buffer.
336  */
337 static OffsetNumber
340  Buffer buf)
341 {
342  Page page;
343  BTPageOpaque opaque;
344  OffsetNumber low,
345  high;
346  int32 result,
347  cmpval;
348 
349  page = BufferGetPage(buf);
350  opaque = BTPageGetOpaque(page);
351 
352  /* Requesting nextkey semantics while using scantid seems nonsensical */
353  Assert(!key->nextkey || key->scantid == NULL);
354  /* scantid-set callers must use _bt_binsrch_insert() on leaf pages */
355  Assert(!P_ISLEAF(opaque) || key->scantid == NULL);
356 
357  low = P_FIRSTDATAKEY(opaque);
358  high = PageGetMaxOffsetNumber(page);
359 
360  /*
361  * If there are no keys on the page, return the first available slot. Note
362  * this covers two cases: the page is really empty (no keys), or it
363  * contains only a high key. The latter case is possible after vacuuming.
364  * This can never happen on an internal page, however, since they are
365  * never empty (an internal page must have children).
366  */
367  if (unlikely(high < low))
368  return low;
369 
370  /*
371  * Binary search to find the first key on the page >= scan key, or first
372  * key > scankey when nextkey is true.
373  *
374  * For nextkey=false (cmpval=1), the loop invariant is: all slots before
375  * 'low' are < scan key, all slots at or after 'high' are >= scan key.
376  *
377  * For nextkey=true (cmpval=0), the loop invariant is: all slots before
378  * 'low' are <= scan key, all slots at or after 'high' are > scan key.
379  *
380  * We can fall out when high == low.
381  */
382  high++; /* establish the loop invariant for high */
383 
384  cmpval = key->nextkey ? 0 : 1; /* select comparison value */
385 
386  while (high > low)
387  {
388  OffsetNumber mid = low + ((high - low) / 2);
389 
390  /* We have low <= mid < high, so mid points at a real slot */
391 
392  result = _bt_compare(rel, key, page, mid);
393 
394  if (result >= cmpval)
395  low = mid + 1;
396  else
397  high = mid;
398  }
399 
400  /*
401  * At this point we have high == low, but be careful: they could point
402  * past the last slot on the page.
403  *
404  * On a leaf page, we always return the first key >= scan key (resp. >
405  * scan key), which could be the last slot + 1.
406  */
407  if (P_ISLEAF(opaque))
408  return low;
409 
410  /*
411  * On a non-leaf page, return the last key < scan key (resp. <= scan key).
412  * There must be one if _bt_compare() is playing by the rules.
413  */
414  Assert(low > P_FIRSTDATAKEY(opaque));
415 
416  return OffsetNumberPrev(low);
417 }
418 
419 /*
420  *
421  * _bt_binsrch_insert() -- Cacheable, incremental leaf page binary search.
422  *
423  * Like _bt_binsrch(), but with support for caching the binary search
424  * bounds. Only used during insertion, and only on the leaf page that it
425  * looks like caller will insert tuple on. Exclusive-locked and pinned
426  * leaf page is contained within insertstate.
427  *
428  * Caches the bounds fields in insertstate so that a subsequent call can
429  * reuse the low and strict high bounds of original binary search. Callers
430  * that use these fields directly must be prepared for the case where low
431  * and/or stricthigh are not on the same page (one or both exceed maxoff
432  * for the page). The case where there are no items on the page (high <
433  * low) makes bounds invalid.
434  *
435  * Caller is responsible for invalidating bounds when it modifies the page
436  * before calling here a second time, and for dealing with posting list
437  * tuple matches (callers can use insertstate's postingoff field to
438  * determine which existing heap TID will need to be replaced by a posting
439  * list split).
440  */
443 {
444  BTScanInsert key = insertstate->itup_key;
445  Page page;
446  BTPageOpaque opaque;
447  OffsetNumber low,
448  high,
449  stricthigh;
450  int32 result,
451  cmpval;
452 
453  page = BufferGetPage(insertstate->buf);
454  opaque = BTPageGetOpaque(page);
455 
456  Assert(P_ISLEAF(opaque));
457  Assert(!key->nextkey);
458  Assert(insertstate->postingoff == 0);
459 
460  if (!insertstate->bounds_valid)
461  {
462  /* Start new binary search */
463  low = P_FIRSTDATAKEY(opaque);
464  high = PageGetMaxOffsetNumber(page);
465  }
466  else
467  {
468  /* Restore result of previous binary search against same page */
469  low = insertstate->low;
470  high = insertstate->stricthigh;
471  }
472 
473  /* If there are no keys on the page, return the first available slot */
474  if (unlikely(high < low))
475  {
476  /* Caller can't reuse bounds */
477  insertstate->low = InvalidOffsetNumber;
478  insertstate->stricthigh = InvalidOffsetNumber;
479  insertstate->bounds_valid = false;
480  return low;
481  }
482 
483  /*
484  * Binary search to find the first key on the page >= scan key. (nextkey
485  * is always false when inserting).
486  *
487  * The loop invariant is: all slots before 'low' are < scan key, all slots
488  * at or after 'high' are >= scan key. 'stricthigh' is > scan key, and is
489  * maintained to save additional search effort for caller.
490  *
491  * We can fall out when high == low.
492  */
493  if (!insertstate->bounds_valid)
494  high++; /* establish the loop invariant for high */
495  stricthigh = high; /* high initially strictly higher */
496 
497  cmpval = 1; /* !nextkey comparison value */
498 
499  while (high > low)
500  {
501  OffsetNumber mid = low + ((high - low) / 2);
502 
503  /* We have low <= mid < high, so mid points at a real slot */
504 
505  result = _bt_compare(rel, key, page, mid);
506 
507  if (result >= cmpval)
508  low = mid + 1;
509  else
510  {
511  high = mid;
512  if (result != 0)
513  stricthigh = high;
514  }
515 
516  /*
517  * If tuple at offset located by binary search is a posting list whose
518  * TID range overlaps with caller's scantid, perform posting list
519  * binary search to set postingoff for caller. Caller must split the
520  * posting list when postingoff is set. This should happen
521  * infrequently.
522  */
523  if (unlikely(result == 0 && key->scantid != NULL))
524  {
525  /*
526  * postingoff should never be set more than once per leaf page
527  * binary search. That would mean that there are duplicate table
528  * TIDs in the index, which is never okay. Check for that here.
529  */
530  if (insertstate->postingoff != 0)
531  ereport(ERROR,
532  (errcode(ERRCODE_INDEX_CORRUPTED),
533  errmsg_internal("table tid from new index tuple (%u,%u) cannot find insert offset between offsets %u and %u of block %u in index \"%s\"",
534  ItemPointerGetBlockNumber(key->scantid),
536  low, stricthigh,
537  BufferGetBlockNumber(insertstate->buf),
538  RelationGetRelationName(rel))));
539 
540  insertstate->postingoff = _bt_binsrch_posting(key, page, mid);
541  }
542  }
543 
544  /*
545  * On a leaf page, a binary search always returns the first key >= scan
546  * key (at least in !nextkey case), which could be the last slot + 1. This
547  * is also the lower bound of cached search.
548  *
549  * stricthigh may also be the last slot + 1, which prevents caller from
550  * using bounds directly, but is still useful to us if we're called a
551  * second time with cached bounds (cached low will be < stricthigh when
552  * that happens).
553  */
554  insertstate->low = low;
555  insertstate->stricthigh = stricthigh;
556  insertstate->bounds_valid = true;
557 
558  return low;
559 }
560 
561 /*----------
562  * _bt_binsrch_posting() -- posting list binary search.
563  *
564  * Helper routine for _bt_binsrch_insert().
565  *
566  * Returns offset into posting list where caller's scantid belongs.
567  *----------
568  */
569 static int
571 {
572  IndexTuple itup;
573  ItemId itemid;
574  int low,
575  high,
576  mid,
577  res;
578 
579  /*
580  * If this isn't a posting tuple, then the index must be corrupt (if it is
581  * an ordinary non-pivot tuple then there must be an existing tuple with a
582  * heap TID that equals inserter's new heap TID/scantid). Defensively
583  * check that tuple is a posting list tuple whose posting list range
584  * includes caller's scantid.
585  *
586  * (This is also needed because contrib/amcheck's rootdescend option needs
587  * to be able to relocate a non-pivot tuple using _bt_binsrch_insert().)
588  */
589  itemid = PageGetItemId(page, offnum);
590  itup = (IndexTuple) PageGetItem(page, itemid);
591  if (!BTreeTupleIsPosting(itup))
592  return 0;
593 
594  Assert(key->heapkeyspace && key->allequalimage);
595 
596  /*
597  * In the event that posting list tuple has LP_DEAD bit set, indicate this
598  * to _bt_binsrch_insert() caller by returning -1, a sentinel value. A
599  * second call to _bt_binsrch_insert() can take place when its caller has
600  * removed the dead item.
601  */
602  if (ItemIdIsDead(itemid))
603  return -1;
604 
605  /* "high" is past end of posting list for loop invariant */
606  low = 0;
607  high = BTreeTupleGetNPosting(itup);
608  Assert(high >= 2);
609 
610  while (high > low)
611  {
612  mid = low + ((high - low) / 2);
613  res = ItemPointerCompare(key->scantid,
614  BTreeTupleGetPostingN(itup, mid));
615 
616  if (res > 0)
617  low = mid + 1;
618  else if (res < 0)
619  high = mid;
620  else
621  return mid;
622  }
623 
624  /* Exact match not found */
625  return low;
626 }
627 
628 /*----------
629  * _bt_compare() -- Compare insertion-type scankey to tuple on a page.
630  *
631  * page/offnum: location of btree item to be compared to.
632  *
633  * This routine returns:
634  * <0 if scankey < tuple at offnum;
635  * 0 if scankey == tuple at offnum;
636  * >0 if scankey > tuple at offnum.
637  *
638  * NULLs in the keys are treated as sortable values. Therefore
639  * "equality" does not necessarily mean that the item should be returned
640  * to the caller as a matching key. Similarly, an insertion scankey
641  * with its scantid set is treated as equal to a posting tuple whose TID
642  * range overlaps with their scantid. There generally won't be a
643  * matching TID in the posting tuple, which caller must handle
644  * themselves (e.g., by splitting the posting list tuple).
645  *
646  * CRUCIAL NOTE: on a non-leaf page, the first data key is assumed to be
647  * "minus infinity": this routine will always claim it is less than the
648  * scankey. The actual key value stored is explicitly truncated to 0
649  * attributes (explicitly minus infinity) with version 3+ indexes, but
650  * that isn't relied upon. This allows us to implement the Lehman and
651  * Yao convention that the first down-link pointer is before the first
652  * key. See backend/access/nbtree/README for details.
653  *----------
654  */
655 int32
658  Page page,
659  OffsetNumber offnum)
660 {
661  TupleDesc itupdesc = RelationGetDescr(rel);
662  BTPageOpaque opaque = BTPageGetOpaque(page);
663  IndexTuple itup;
664  ItemPointer heapTid;
665  ScanKey scankey;
666  int ncmpkey;
667  int ntupatts;
668  int32 result;
669 
670  Assert(_bt_check_natts(rel, key->heapkeyspace, page, offnum));
672  Assert(key->heapkeyspace || key->scantid == NULL);
673 
674  /*
675  * Force result ">" if target item is first data item on an internal page
676  * --- see NOTE above.
677  */
678  if (!P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
679  return 1;
680 
681  itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
682  ntupatts = BTreeTupleGetNAtts(itup, rel);
683 
684  /*
685  * The scan key is set up with the attribute number associated with each
686  * term in the key. It is important that, if the index is multi-key, the
687  * scan contain the first k key attributes, and that they be in order. If
688  * you think about how multi-key ordering works, you'll understand why
689  * this is.
690  *
691  * We don't test for violation of this condition here, however. The
692  * initial setup for the index scan had better have gotten it right (see
693  * _bt_first).
694  */
695 
696  ncmpkey = Min(ntupatts, key->keysz);
697  Assert(key->heapkeyspace || ncmpkey == key->keysz);
698  Assert(!BTreeTupleIsPosting(itup) || key->allequalimage);
699  scankey = key->scankeys;
700  for (int i = 1; i <= ncmpkey; i++)
701  {
702  Datum datum;
703  bool isNull;
704 
705  datum = index_getattr(itup, scankey->sk_attno, itupdesc, &isNull);
706 
707  if (scankey->sk_flags & SK_ISNULL) /* key is NULL */
708  {
709  if (isNull)
710  result = 0; /* NULL "=" NULL */
711  else if (scankey->sk_flags & SK_BT_NULLS_FIRST)
712  result = -1; /* NULL "<" NOT_NULL */
713  else
714  result = 1; /* NULL ">" NOT_NULL */
715  }
716  else if (isNull) /* key is NOT_NULL and item is NULL */
717  {
718  if (scankey->sk_flags & SK_BT_NULLS_FIRST)
719  result = 1; /* NOT_NULL ">" NULL */
720  else
721  result = -1; /* NOT_NULL "<" NULL */
722  }
723  else
724  {
725  /*
726  * The sk_func needs to be passed the index value as left arg and
727  * the sk_argument as right arg (they might be of different
728  * types). Since it is convenient for callers to think of
729  * _bt_compare as comparing the scankey to the index item, we have
730  * to flip the sign of the comparison result. (Unless it's a DESC
731  * column, in which case we *don't* flip the sign.)
732  */
733  result = DatumGetInt32(FunctionCall2Coll(&scankey->sk_func,
734  scankey->sk_collation,
735  datum,
736  scankey->sk_argument));
737 
738  if (!(scankey->sk_flags & SK_BT_DESC))
739  INVERT_COMPARE_RESULT(result);
740  }
741 
742  /* if the keys are unequal, return the difference */
743  if (result != 0)
744  return result;
745 
746  scankey++;
747  }
748 
749  /*
750  * All non-truncated attributes (other than heap TID) were found to be
751  * equal. Treat truncated attributes as minus infinity when scankey has a
752  * key attribute value that would otherwise be compared directly.
753  *
754  * Note: it doesn't matter if ntupatts includes non-key attributes;
755  * scankey won't, so explicitly excluding non-key attributes isn't
756  * necessary.
757  */
758  if (key->keysz > ntupatts)
759  return 1;
760 
761  /*
762  * Use the heap TID attribute and scantid to try to break the tie. The
763  * rules are the same as any other key attribute -- only the
764  * representation differs.
765  */
766  heapTid = BTreeTupleGetHeapTID(itup);
767  if (key->scantid == NULL)
768  {
769  /*
770  * Most searches have a scankey that is considered greater than a
771  * truncated pivot tuple if and when the scankey has equal values for
772  * attributes up to and including the least significant untruncated
773  * attribute in tuple.
774  *
775  * For example, if an index has the minimum two attributes (single
776  * user key attribute, plus heap TID attribute), and a page's high key
777  * is ('foo', -inf), and scankey is ('foo', <omitted>), the search
778  * will not descend to the page to the left. The search will descend
779  * right instead. The truncated attribute in pivot tuple means that
780  * all non-pivot tuples on the page to the left are strictly < 'foo',
781  * so it isn't necessary to descend left. In other words, search
782  * doesn't have to descend left because it isn't interested in a match
783  * that has a heap TID value of -inf.
784  *
785  * However, some searches (pivotsearch searches) actually require that
786  * we descend left when this happens. -inf is treated as a possible
787  * match for omitted scankey attribute(s). This is needed by page
788  * deletion, which must re-find leaf pages that are targets for
789  * deletion using their high keys.
790  *
791  * Note: the heap TID part of the test ensures that scankey is being
792  * compared to a pivot tuple with one or more truncated key
793  * attributes.
794  *
795  * Note: pg_upgrade'd !heapkeyspace indexes must always descend to the
796  * left here, since they have no heap TID attribute (and cannot have
797  * any -inf key values in any case, since truncation can only remove
798  * non-key attributes). !heapkeyspace searches must always be
799  * prepared to deal with matches on both sides of the pivot once the
800  * leaf level is reached.
801  */
802  if (key->heapkeyspace && !key->pivotsearch &&
803  key->keysz == ntupatts && heapTid == NULL)
804  return 1;
805 
806  /* All provided scankey arguments found to be equal */
807  return 0;
808  }
809 
810  /*
811  * Treat truncated heap TID as minus infinity, since scankey has a key
812  * attribute value (scantid) that would otherwise be compared directly
813  */
815  if (heapTid == NULL)
816  return 1;
817 
818  /*
819  * Scankey must be treated as equal to a posting list tuple if its scantid
820  * value falls within the range of the posting list. In all other cases
821  * there can only be a single heap TID value, which is compared directly
822  * with scantid.
823  */
825  result = ItemPointerCompare(key->scantid, heapTid);
826  if (result <= 0 || !BTreeTupleIsPosting(itup))
827  return result;
828  else
829  {
830  result = ItemPointerCompare(key->scantid,
832  if (result > 0)
833  return 1;
834  }
835 
836  return 0;
837 }
838 
839 /*
840  * _bt_first() -- Find the first item in a scan.
841  *
842  * We need to be clever about the direction of scan, the search
843  * conditions, and the tree ordering. We find the first item (or,
844  * if backwards scan, the last item) in the tree that satisfies the
845  * qualifications in the scan key. On success exit, the page containing
846  * the current index tuple is pinned but not locked, and data about
847  * the matching tuple(s) on the page has been loaded into so->currPos.
848  * scan->xs_ctup.t_self is set to the heap TID of the current tuple,
849  * and if requested, scan->xs_itup points to a copy of the index tuple.
850  *
851  * If there are no matching items in the index, we return false, with no
852  * pins or locks held.
853  *
854  * Note that scan->keyData[], and the so->keyData[] scankey built from it,
855  * are both search-type scankeys (see nbtree/README for more about this).
856  * Within this routine, we build a temporary insertion-type scankey to use
857  * in locating the scan start position.
858  */
859 bool
861 {
862  Relation rel = scan->indexRelation;
863  BTScanOpaque so = (BTScanOpaque) scan->opaque;
864  Buffer buf;
865  BTStack stack;
866  OffsetNumber offnum;
867  StrategyNumber strat;
868  bool nextkey;
869  bool goback;
870  BTScanInsertData inskey;
871  ScanKey startKeys[INDEX_MAX_KEYS];
872  ScanKeyData notnullkeys[INDEX_MAX_KEYS];
873  int keysCount = 0;
874  int i;
875  bool status;
876  StrategyNumber strat_total;
877  BTScanPosItem *currItem;
878  BlockNumber blkno;
879 
881 
883 
884  /*
885  * Examine the scan keys and eliminate any redundant keys; also mark the
886  * keys that must be matched to continue the scan.
887  */
888  _bt_preprocess_keys(scan);
889 
890  /*
891  * Quit now if _bt_preprocess_keys() discovered that the scan keys can
892  * never be satisfied (eg, x == 1 AND x > 2).
893  */
894  if (!so->qual_ok)
895  {
896  /* Notify any other workers that we're done with this scan key. */
897  _bt_parallel_done(scan);
898  return false;
899  }
900 
901  /*
902  * For parallel scans, get the starting page from shared state. If the
903  * scan has not started, proceed to find out first leaf page in the usual
904  * way while keeping other participating processes waiting. If the scan
905  * has already begun, use the page number from the shared structure.
906  */
907  if (scan->parallel_scan != NULL)
908  {
909  status = _bt_parallel_seize(scan, &blkno);
910  if (!status)
911  return false;
912  else if (blkno == P_NONE)
913  {
914  _bt_parallel_done(scan);
915  return false;
916  }
917  else if (blkno != InvalidBlockNumber)
918  {
919  if (!_bt_parallel_readpage(scan, blkno, dir))
920  return false;
921  goto readcomplete;
922  }
923  }
924 
925  /*----------
926  * Examine the scan keys to discover where we need to start the scan.
927  *
928  * We want to identify the keys that can be used as starting boundaries;
929  * these are =, >, or >= keys for a forward scan or =, <, <= keys for
930  * a backwards scan. We can use keys for multiple attributes so long as
931  * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
932  * a > or < boundary or find an attribute with no boundary (which can be
933  * thought of as the same as "> -infinity"), we can't use keys for any
934  * attributes to its right, because it would break our simplistic notion
935  * of what initial positioning strategy to use.
936  *
937  * When the scan keys include cross-type operators, _bt_preprocess_keys
938  * may not be able to eliminate redundant keys; in such cases we will
939  * arbitrarily pick a usable one for each attribute. This is correct
940  * but possibly not optimal behavior. (For example, with keys like
941  * "x >= 4 AND x >= 5" we would elect to scan starting at x=4 when
942  * x=5 would be more efficient.) Since the situation only arises given
943  * a poorly-worded query plus an incomplete opfamily, live with it.
944  *
945  * When both equality and inequality keys appear for a single attribute
946  * (again, only possible when cross-type operators appear), we *must*
947  * select one of the equality keys for the starting point, because
948  * _bt_checkkeys() will stop the scan as soon as an equality qual fails.
949  * For example, if we have keys like "x >= 4 AND x = 10" and we elect to
950  * start at x=4, we will fail and stop before reaching x=10. If multiple
951  * equality quals survive preprocessing, however, it doesn't matter which
952  * one we use --- by definition, they are either redundant or
953  * contradictory.
954  *
955  * Any regular (not SK_SEARCHNULL) key implies a NOT NULL qualifier.
956  * If the index stores nulls at the end of the index we'll be starting
957  * from, and we have no boundary key for the column (which means the key
958  * we deduced NOT NULL from is an inequality key that constrains the other
959  * end of the index), then we cons up an explicit SK_SEARCHNOTNULL key to
960  * use as a boundary key. If we didn't do this, we might find ourselves
961  * traversing a lot of null entries at the start of the scan.
962  *
963  * In this loop, row-comparison keys are treated the same as keys on their
964  * first (leftmost) columns. We'll add on lower-order columns of the row
965  * comparison below, if possible.
966  *
967  * The selected scan keys (at most one per index column) are remembered by
968  * storing their addresses into the local startKeys[] array.
969  *----------
970  */
971  strat_total = BTEqualStrategyNumber;
972  if (so->numberOfKeys > 0)
973  {
974  AttrNumber curattr;
975  ScanKey chosen;
976  ScanKey impliesNN;
977  ScanKey cur;
978 
979  /*
980  * chosen is the so-far-chosen key for the current attribute, if any.
981  * We don't cast the decision in stone until we reach keys for the
982  * next attribute.
983  */
984  curattr = 1;
985  chosen = NULL;
986  /* Also remember any scankey that implies a NOT NULL constraint */
987  impliesNN = NULL;
988 
989  /*
990  * Loop iterates from 0 to numberOfKeys inclusive; we use the last
991  * pass to handle after-last-key processing. Actual exit from the
992  * loop is at one of the "break" statements below.
993  */
994  for (cur = so->keyData, i = 0;; cur++, i++)
995  {
996  if (i >= so->numberOfKeys || cur->sk_attno != curattr)
997  {
998  /*
999  * Done looking at keys for curattr. If we didn't find a
1000  * usable boundary key, see if we can deduce a NOT NULL key.
1001  */
1002  if (chosen == NULL && impliesNN != NULL &&
1003  ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
1004  ScanDirectionIsForward(dir) :
1006  {
1007  /* Yes, so build the key in notnullkeys[keysCount] */
1008  chosen = &notnullkeys[keysCount];
1009  ScanKeyEntryInitialize(chosen,
1011  (impliesNN->sk_flags &
1013  curattr,
1014  ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
1017  InvalidOid,
1018  InvalidOid,
1019  InvalidOid,
1020  (Datum) 0);
1021  }
1022 
1023  /*
1024  * If we still didn't find a usable boundary key, quit; else
1025  * save the boundary key pointer in startKeys.
1026  */
1027  if (chosen == NULL)
1028  break;
1029  startKeys[keysCount++] = chosen;
1030 
1031  /*
1032  * Adjust strat_total, and quit if we have stored a > or <
1033  * key.
1034  */
1035  strat = chosen->sk_strategy;
1036  if (strat != BTEqualStrategyNumber)
1037  {
1038  strat_total = strat;
1039  if (strat == BTGreaterStrategyNumber ||
1040  strat == BTLessStrategyNumber)
1041  break;
1042  }
1043 
1044  /*
1045  * Done if that was the last attribute, or if next key is not
1046  * in sequence (implying no boundary key is available for the
1047  * next attribute).
1048  */
1049  if (i >= so->numberOfKeys ||
1050  cur->sk_attno != curattr + 1)
1051  break;
1052 
1053  /*
1054  * Reset for next attr.
1055  */
1056  curattr = cur->sk_attno;
1057  chosen = NULL;
1058  impliesNN = NULL;
1059  }
1060 
1061  /*
1062  * Can we use this key as a starting boundary for this attr?
1063  *
1064  * If not, does it imply a NOT NULL constraint? (Because
1065  * SK_SEARCHNULL keys are always assigned BTEqualStrategyNumber,
1066  * *any* inequality key works for that; we need not test.)
1067  */
1068  switch (cur->sk_strategy)
1069  {
1070  case BTLessStrategyNumber:
1072  if (chosen == NULL)
1073  {
1074  if (ScanDirectionIsBackward(dir))
1075  chosen = cur;
1076  else
1077  impliesNN = cur;
1078  }
1079  break;
1080  case BTEqualStrategyNumber:
1081  /* override any non-equality choice */
1082  chosen = cur;
1083  break;
1086  if (chosen == NULL)
1087  {
1088  if (ScanDirectionIsForward(dir))
1089  chosen = cur;
1090  else
1091  impliesNN = cur;
1092  }
1093  break;
1094  }
1095  }
1096  }
1097 
1098  /*
1099  * If we found no usable boundary keys, we have to start from one end of
1100  * the tree. Walk down that edge to the first or last key, and scan from
1101  * there.
1102  */
1103  if (keysCount == 0)
1104  {
1105  bool match;
1106 
1107  match = _bt_endpoint(scan, dir);
1108 
1109  if (!match)
1110  {
1111  /* No match, so mark (parallel) scan finished */
1112  _bt_parallel_done(scan);
1113  }
1114 
1115  return match;
1116  }
1117 
1118  /*
1119  * We want to start the scan somewhere within the index. Set up an
1120  * insertion scankey we can use to search for the boundary point we
1121  * identified above. The insertion scankey is built using the keys
1122  * identified by startKeys[]. (Remaining insertion scankey fields are
1123  * initialized after initial-positioning strategy is finalized.)
1124  */
1125  Assert(keysCount <= INDEX_MAX_KEYS);
1126  for (i = 0; i < keysCount; i++)
1127  {
1128  ScanKey cur = startKeys[i];
1129 
1130  Assert(cur->sk_attno == i + 1);
1131 
1132  if (cur->sk_flags & SK_ROW_HEADER)
1133  {
1134  /*
1135  * Row comparison header: look to the first row member instead.
1136  *
1137  * The member scankeys are already in insertion format (ie, they
1138  * have sk_func = 3-way-comparison function), but we have to watch
1139  * out for nulls, which _bt_preprocess_keys didn't check. A null
1140  * in the first row member makes the condition unmatchable, just
1141  * like qual_ok = false.
1142  */
1143  ScanKey subkey = (ScanKey) DatumGetPointer(cur->sk_argument);
1144 
1145  Assert(subkey->sk_flags & SK_ROW_MEMBER);
1146  if (subkey->sk_flags & SK_ISNULL)
1147  {
1148  _bt_parallel_done(scan);
1149  return false;
1150  }
1151  memcpy(inskey.scankeys + i, subkey, sizeof(ScanKeyData));
1152 
1153  /*
1154  * If the row comparison is the last positioning key we accepted,
1155  * try to add additional keys from the lower-order row members.
1156  * (If we accepted independent conditions on additional index
1157  * columns, we use those instead --- doesn't seem worth trying to
1158  * determine which is more restrictive.) Note that this is OK
1159  * even if the row comparison is of ">" or "<" type, because the
1160  * condition applied to all but the last row member is effectively
1161  * ">=" or "<=", and so the extra keys don't break the positioning
1162  * scheme. But, by the same token, if we aren't able to use all
1163  * the row members, then the part of the row comparison that we
1164  * did use has to be treated as just a ">=" or "<=" condition, and
1165  * so we'd better adjust strat_total accordingly.
1166  */
1167  if (i == keysCount - 1)
1168  {
1169  bool used_all_subkeys = false;
1170 
1171  Assert(!(subkey->sk_flags & SK_ROW_END));
1172  for (;;)
1173  {
1174  subkey++;
1175  Assert(subkey->sk_flags & SK_ROW_MEMBER);
1176  if (subkey->sk_attno != keysCount + 1)
1177  break; /* out-of-sequence, can't use it */
1178  if (subkey->sk_strategy != cur->sk_strategy)
1179  break; /* wrong direction, can't use it */
1180  if (subkey->sk_flags & SK_ISNULL)
1181  break; /* can't use null keys */
1182  Assert(keysCount < INDEX_MAX_KEYS);
1183  memcpy(inskey.scankeys + keysCount, subkey,
1184  sizeof(ScanKeyData));
1185  keysCount++;
1186  if (subkey->sk_flags & SK_ROW_END)
1187  {
1188  used_all_subkeys = true;
1189  break;
1190  }
1191  }
1192  if (!used_all_subkeys)
1193  {
1194  switch (strat_total)
1195  {
1196  case BTLessStrategyNumber:
1197  strat_total = BTLessEqualStrategyNumber;
1198  break;
1200  strat_total = BTGreaterEqualStrategyNumber;
1201  break;
1202  }
1203  }
1204  break; /* done with outer loop */
1205  }
1206  }
1207  else
1208  {
1209  /*
1210  * Ordinary comparison key. Transform the search-style scan key
1211  * to an insertion scan key by replacing the sk_func with the
1212  * appropriate btree comparison function.
1213  *
1214  * If scankey operator is not a cross-type comparison, we can use
1215  * the cached comparison function; otherwise gotta look it up in
1216  * the catalogs. (That can't lead to infinite recursion, since no
1217  * indexscan initiated by syscache lookup will use cross-data-type
1218  * operators.)
1219  *
1220  * We support the convention that sk_subtype == InvalidOid means
1221  * the opclass input type; this is a hack to simplify life for
1222  * ScanKeyInit().
1223  */
1224  if (cur->sk_subtype == rel->rd_opcintype[i] ||
1225  cur->sk_subtype == InvalidOid)
1226  {
1227  FmgrInfo *procinfo;
1228 
1229  procinfo = index_getprocinfo(rel, cur->sk_attno, BTORDER_PROC);
1230  ScanKeyEntryInitializeWithInfo(inskey.scankeys + i,
1231  cur->sk_flags,
1232  cur->sk_attno,
1234  cur->sk_subtype,
1235  cur->sk_collation,
1236  procinfo,
1237  cur->sk_argument);
1238  }
1239  else
1240  {
1241  RegProcedure cmp_proc;
1242 
1243  cmp_proc = get_opfamily_proc(rel->rd_opfamily[i],
1244  rel->rd_opcintype[i],
1245  cur->sk_subtype,
1246  BTORDER_PROC);
1247  if (!RegProcedureIsValid(cmp_proc))
1248  elog(ERROR, "missing support function %d(%u,%u) for attribute %d of index \"%s\"",
1249  BTORDER_PROC, rel->rd_opcintype[i], cur->sk_subtype,
1250  cur->sk_attno, RelationGetRelationName(rel));
1251  ScanKeyEntryInitialize(inskey.scankeys + i,
1252  cur->sk_flags,
1253  cur->sk_attno,
1255  cur->sk_subtype,
1256  cur->sk_collation,
1257  cmp_proc,
1258  cur->sk_argument);
1259  }
1260  }
1261  }
1262 
1263  /*----------
1264  * Examine the selected initial-positioning strategy to determine exactly
1265  * where we need to start the scan, and set flag variables to control the
1266  * code below.
1267  *
1268  * If nextkey = false, _bt_search and _bt_binsrch will locate the first
1269  * item >= scan key. If nextkey = true, they will locate the first
1270  * item > scan key.
1271  *
1272  * If goback = true, we will then step back one item, while if
1273  * goback = false, we will start the scan on the located item.
1274  *----------
1275  */
1276  switch (strat_total)
1277  {
1278  case BTLessStrategyNumber:
1279 
1280  /*
1281  * Find first item >= scankey, then back up one to arrive at last
1282  * item < scankey. (Note: this positioning strategy is only used
1283  * for a backward scan, so that is always the correct starting
1284  * position.)
1285  */
1286  nextkey = false;
1287  goback = true;
1288  break;
1289 
1291 
1292  /*
1293  * Find first item > scankey, then back up one to arrive at last
1294  * item <= scankey. (Note: this positioning strategy is only used
1295  * for a backward scan, so that is always the correct starting
1296  * position.)
1297  */
1298  nextkey = true;
1299  goback = true;
1300  break;
1301 
1302  case BTEqualStrategyNumber:
1303 
1304  /*
1305  * If a backward scan was specified, need to start with last equal
1306  * item not first one.
1307  */
1308  if (ScanDirectionIsBackward(dir))
1309  {
1310  /*
1311  * This is the same as the <= strategy. We will check at the
1312  * end whether the found item is actually =.
1313  */
1314  nextkey = true;
1315  goback = true;
1316  }
1317  else
1318  {
1319  /*
1320  * This is the same as the >= strategy. We will check at the
1321  * end whether the found item is actually =.
1322  */
1323  nextkey = false;
1324  goback = false;
1325  }
1326  break;
1327 
1329 
1330  /*
1331  * Find first item >= scankey. (This is only used for forward
1332  * scans.)
1333  */
1334  nextkey = false;
1335  goback = false;
1336  break;
1337 
1339 
1340  /*
1341  * Find first item > scankey. (This is only used for forward
1342  * scans.)
1343  */
1344  nextkey = true;
1345  goback = false;
1346  break;
1347 
1348  default:
1349  /* can't get here, but keep compiler quiet */
1350  elog(ERROR, "unrecognized strat_total: %d", (int) strat_total);
1351  return false;
1352  }
1353 
1354  /* Initialize remaining insertion scan key fields */
1355  _bt_metaversion(rel, &inskey.heapkeyspace, &inskey.allequalimage);
1356  inskey.anynullkeys = false; /* unused */
1357  inskey.nextkey = nextkey;
1358  inskey.pivotsearch = false;
1359  inskey.scantid = NULL;
1360  inskey.keysz = keysCount;
1361 
1362  /*
1363  * Use the manufactured insertion scan key to descend the tree and
1364  * position ourselves on the target leaf page.
1365  */
1366  stack = _bt_search(rel, NULL, &inskey, &buf, BT_READ);
1367 
1368  /* don't need to keep the stack around... */
1369  _bt_freestack(stack);
1370 
1371  if (!BufferIsValid(buf))
1372  {
1373  /*
1374  * We only get here if the index is completely empty. Lock relation
1375  * because nothing finer to lock exists. Without a buffer lock, it's
1376  * possible for another transaction to insert data between
1377  * _bt_search() and PredicateLockRelation(). We have to try again
1378  * after taking the relation-level predicate lock, to close a narrow
1379  * window where we wouldn't scan concurrently inserted tuples, but the
1380  * writer wouldn't see our predicate lock.
1381  */
1383  {
1384  PredicateLockRelation(rel, scan->xs_snapshot);
1385  stack = _bt_search(rel, NULL, &inskey, &buf, BT_READ);
1386  _bt_freestack(stack);
1387  }
1388 
1389  if (!BufferIsValid(buf))
1390  {
1391  /*
1392  * Mark parallel scan as done, so that all the workers can finish
1393  * their scan.
1394  */
1395  _bt_parallel_done(scan);
1397  return false;
1398  }
1399  }
1400 
1402 
1403  _bt_initialize_more_data(so, dir);
1404 
1405  /* position to the precise item on the page */
1406  offnum = _bt_binsrch(rel, &inskey, buf);
1407 
1408  /*
1409  * If nextkey = false, we are positioned at the first item >= scan key, or
1410  * possibly at the end of a page on which all the existing items are less
1411  * than the scan key and we know that everything on later pages is greater
1412  * than or equal to scan key.
1413  *
1414  * If nextkey = true, we are positioned at the first item > scan key, or
1415  * possibly at the end of a page on which all the existing items are less
1416  * than or equal to the scan key and we know that everything on later
1417  * pages is greater than scan key.
1418  *
1419  * The actually desired starting point is either this item or the prior
1420  * one, or in the end-of-page case it's the first item on the next page or
1421  * the last item on this page. Adjust the starting offset if needed. (If
1422  * this results in an offset before the first item or after the last one,
1423  * _bt_readpage will report no items found, and then we'll step to the
1424  * next page as needed.)
1425  */
1426  if (goback)
1427  offnum = OffsetNumberPrev(offnum);
1428 
1429  /* remember which buffer we have pinned, if any */
1431  so->currPos.buf = buf;
1432  so->firstPage = true;
1433 
1434  /*
1435  * Now load data from the first page of the scan.
1436  */
1437  if (!_bt_readpage(scan, dir, offnum))
1438  {
1439  /*
1440  * There's no actually-matching data on this page. Try to advance to
1441  * the next page. Return false if there's no matching data at all.
1442  */
1443  _bt_unlockbuf(scan->indexRelation, so->currPos.buf);
1444  if (!_bt_steppage(scan, dir))
1445  return false;
1446  }
1447  else
1448  {
1449  /* Drop the lock, and maybe the pin, on the current page */
1451  }
1452 
1453 readcomplete:
1454  /* OK, itemIndex says what to return */
1455  currItem = &so->currPos.items[so->currPos.itemIndex];
1456  scan->xs_heaptid = currItem->heapTid;
1457  if (scan->xs_want_itup)
1458  scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset);
1459 
1460  return true;
1461 }
1462 
1463 /*
1464  * _bt_next() -- Get the next item in a scan.
1465  *
1466  * On entry, so->currPos describes the current page, which may be pinned
1467  * but is not locked, and so->currPos.itemIndex identifies which item was
1468  * previously returned.
1469  *
1470  * On successful exit, scan->xs_ctup.t_self is set to the TID of the
1471  * next heap tuple, and if requested, scan->xs_itup points to a copy of
1472  * the index tuple. so->currPos is updated as needed.
1473  *
1474  * On failure exit (no more tuples), we release pin and set
1475  * so->currPos.buf to InvalidBuffer.
1476  */
1477 bool
1479 {
1480  BTScanOpaque so = (BTScanOpaque) scan->opaque;
1481  BTScanPosItem *currItem;
1482 
1483  /*
1484  * Advance to next tuple on current page; or if there's no more, try to
1485  * step to the next page with data.
1486  */
1487  if (ScanDirectionIsForward(dir))
1488  {
1489  if (++so->currPos.itemIndex > so->currPos.lastItem)
1490  {
1491  if (!_bt_steppage(scan, dir))
1492  return false;
1493  }
1494  }
1495  else
1496  {
1497  if (--so->currPos.itemIndex < so->currPos.firstItem)
1498  {
1499  if (!_bt_steppage(scan, dir))
1500  return false;
1501  }
1502  }
1503 
1504  /* OK, itemIndex says what to return */
1505  currItem = &so->currPos.items[so->currPos.itemIndex];
1506  scan->xs_heaptid = currItem->heapTid;
1507  if (scan->xs_want_itup)
1508  scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset);
1509 
1510  return true;
1511 }
1512 
1513 /*
1514  * _bt_readpage() -- Load data from current index page into so->currPos
1515  *
1516  * Caller must have pinned and read-locked so->currPos.buf; the buffer's state
1517  * is not changed here. Also, currPos.moreLeft and moreRight must be valid;
1518  * they are updated as appropriate. All other fields of so->currPos are
1519  * initialized from scratch here.
1520  *
1521  * We scan the current page starting at offnum and moving in the indicated
1522  * direction. All items matching the scan keys are loaded into currPos.items.
1523  * moreLeft or moreRight (as appropriate) is cleared if _bt_checkkeys reports
1524  * that there can be no more matching tuples in the current scan direction.
1525  *
1526  * In the case of a parallel scan, caller must have called _bt_parallel_seize
1527  * prior to calling this function; this function will invoke
1528  * _bt_parallel_release before returning.
1529  *
1530  * Returns true if any matching items found on the page, false if none.
1531  */
1532 static bool
1534 {
1535  BTScanOpaque so = (BTScanOpaque) scan->opaque;
1536  Page page;
1537  BTPageOpaque opaque;
1538  OffsetNumber minoff;
1539  OffsetNumber maxoff;
1540  int itemIndex;
1541  bool continuescan;
1542  int indnatts;
1543  bool requiredMatchedByPrecheck;
1544 
1545  /*
1546  * We must have the buffer pinned and locked, but the usual macro can't be
1547  * used here; this function is what makes it good for currPos.
1548  */
1550 
1551  page = BufferGetPage(so->currPos.buf);
1552  opaque = BTPageGetOpaque(page);
1553 
1554  /* allow next page be processed by parallel worker */
1555  if (scan->parallel_scan)
1556  {
1557  if (ScanDirectionIsForward(dir))
1558  _bt_parallel_release(scan, opaque->btpo_next);
1559  else
1561  }
1562 
1563  continuescan = true; /* default assumption */
1565  minoff = P_FIRSTDATAKEY(opaque);
1566  maxoff = PageGetMaxOffsetNumber(page);
1567 
1568  /*
1569  * We note the buffer's block number so that we can release the pin later.
1570  * This allows us to re-read the buffer if it is needed again for hinting.
1571  */
1573 
1574  /*
1575  * We save the LSN of the page as we read it, so that we know whether it
1576  * safe to apply LP_DEAD hints to the page later. This allows us to drop
1577  * the pin for MVCC scans, which allows vacuum to avoid blocking.
1578  */
1580 
1581  /*
1582  * we must save the page's right-link while scanning it; this tells us
1583  * where to step right to after we're done with these items. There is no
1584  * corresponding need for the left-link, since splits always go right.
1585  */
1586  so->currPos.nextPage = opaque->btpo_next;
1587 
1588  /* initialize tuple workspace to empty */
1589  so->currPos.nextTupleOffset = 0;
1590 
1591  /*
1592  * Now that the current page has been made consistent, the macro should be
1593  * good.
1594  */
1596 
1597  /*
1598  * Prechecking the page with scan keys required for direction scan. We
1599  * check these keys with the last item on the page (according to our scan
1600  * direction). If these keys are matched, we can skip checking them with
1601  * every item on the page. Scan keys for our scan direction would
1602  * necessarily match the previous items. Scan keys required for opposite
1603  * direction scan are already matched by the _bt_first() call.
1604  *
1605  * With the forward scan, we do this check for the last item on the page
1606  * instead of the high key. It's relatively likely that the most
1607  * significant column in the high key will be different from the
1608  * corresponding value from the last item on the page. So checking with
1609  * the last item on the page would give a more precise answer.
1610  *
1611  * We skip this for the first page in the scan to evade the possible
1612  * slowdown of the point queries.
1613  */
1614  if (!so->firstPage && minoff < maxoff)
1615  {
1616  ItemId iid;
1617  IndexTuple itup;
1618 
1619  iid = PageGetItemId(page, ScanDirectionIsForward(dir) ? maxoff : minoff);
1620  itup = (IndexTuple) PageGetItem(page, iid);
1621 
1622  /*
1623  * Do the precheck. Note that we pass the pointer to
1624  * 'requiredMatchedByPrecheck' to 'continuescan' argument. That will
1625  * set flag to true if all required keys are satisfied and false
1626  * otherwise.
1627  */
1628  (void) _bt_checkkeys(scan, itup, indnatts, dir,
1629  &requiredMatchedByPrecheck, false);
1630  }
1631  else
1632  {
1633  so->firstPage = false;
1634  requiredMatchedByPrecheck = false;
1635  }
1636 
1637  if (ScanDirectionIsForward(dir))
1638  {
1639  /* load items[] in ascending order */
1640  itemIndex = 0;
1641 
1642  offnum = Max(offnum, minoff);
1643 
1644  while (offnum <= maxoff)
1645  {
1646  ItemId iid = PageGetItemId(page, offnum);
1647  IndexTuple itup;
1648  bool passes_quals;
1649 
1650  /*
1651  * If the scan specifies not to return killed tuples, then we
1652  * treat a killed tuple as not passing the qual
1653  */
1654  if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
1655  {
1656  offnum = OffsetNumberNext(offnum);
1657  continue;
1658  }
1659 
1660  itup = (IndexTuple) PageGetItem(page, iid);
1661 
1662  passes_quals = _bt_checkkeys(scan, itup, indnatts, dir,
1663  &continuescan, requiredMatchedByPrecheck);
1664 
1665  /*
1666  * If the result of prechecking required keys was true, then in
1667  * assert-enabled builds we also recheck that the _bt_checkkeys()
1668  * result is the same.
1669  */
1670  Assert(!requiredMatchedByPrecheck ||
1671  passes_quals == _bt_checkkeys(scan, itup, indnatts, dir,
1672  &continuescan, false));
1673  if (passes_quals)
1674  {
1675  /* tuple passes all scan key conditions */
1676  if (!BTreeTupleIsPosting(itup))
1677  {
1678  /* Remember it */
1679  _bt_saveitem(so, itemIndex, offnum, itup);
1680  itemIndex++;
1681  }
1682  else
1683  {
1684  int tupleOffset;
1685 
1686  /*
1687  * Set up state to return posting list, and remember first
1688  * TID
1689  */
1690  tupleOffset =
1691  _bt_setuppostingitems(so, itemIndex, offnum,
1692  BTreeTupleGetPostingN(itup, 0),
1693  itup);
1694  itemIndex++;
1695  /* Remember additional TIDs */
1696  for (int i = 1; i < BTreeTupleGetNPosting(itup); i++)
1697  {
1698  _bt_savepostingitem(so, itemIndex, offnum,
1699  BTreeTupleGetPostingN(itup, i),
1700  tupleOffset);
1701  itemIndex++;
1702  }
1703  }
1704  }
1705  /* When !continuescan, there can't be any more matches, so stop */
1706  if (!continuescan)
1707  break;
1708 
1709  offnum = OffsetNumberNext(offnum);
1710  }
1711 
1712  /*
1713  * We don't need to visit page to the right when the high key
1714  * indicates that no more matches will be found there.
1715  *
1716  * Checking the high key like this works out more often than you might
1717  * think. Leaf page splits pick a split point between the two most
1718  * dissimilar tuples (this is weighed against the need to evenly share
1719  * free space). Leaf pages with high key attribute values that can
1720  * only appear on non-pivot tuples on the right sibling page are
1721  * common.
1722  */
1723  if (continuescan && !P_RIGHTMOST(opaque))
1724  {
1725  ItemId iid = PageGetItemId(page, P_HIKEY);
1726  IndexTuple itup = (IndexTuple) PageGetItem(page, iid);
1727  int truncatt;
1728 
1729  truncatt = BTreeTupleGetNAtts(itup, scan->indexRelation);
1730  _bt_checkkeys(scan, itup, truncatt, dir, &continuescan, false);
1731  }
1732 
1733  if (!continuescan)
1734  so->currPos.moreRight = false;
1735 
1736  Assert(itemIndex <= MaxTIDsPerBTreePage);
1737  so->currPos.firstItem = 0;
1738  so->currPos.lastItem = itemIndex - 1;
1739  so->currPos.itemIndex = 0;
1740  }
1741  else
1742  {
1743  /* load items[] in descending order */
1744  itemIndex = MaxTIDsPerBTreePage;
1745 
1746  offnum = Min(offnum, maxoff);
1747 
1748  while (offnum >= minoff)
1749  {
1750  ItemId iid = PageGetItemId(page, offnum);
1751  IndexTuple itup;
1752  bool tuple_alive;
1753  bool passes_quals;
1754 
1755  /*
1756  * If the scan specifies not to return killed tuples, then we
1757  * treat a killed tuple as not passing the qual. Most of the
1758  * time, it's a win to not bother examining the tuple's index
1759  * keys, but just skip to the next tuple (previous, actually,
1760  * since we're scanning backwards). However, if this is the first
1761  * tuple on the page, we do check the index keys, to prevent
1762  * uselessly advancing to the page to the left. This is similar
1763  * to the high key optimization used by forward scans.
1764  */
1765  if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
1766  {
1767  Assert(offnum >= P_FIRSTDATAKEY(opaque));
1768  if (offnum > P_FIRSTDATAKEY(opaque))
1769  {
1770  offnum = OffsetNumberPrev(offnum);
1771  continue;
1772  }
1773 
1774  tuple_alive = false;
1775  }
1776  else
1777  tuple_alive = true;
1778 
1779  itup = (IndexTuple) PageGetItem(page, iid);
1780 
1781  passes_quals = _bt_checkkeys(scan, itup, indnatts, dir,
1782  &continuescan, requiredMatchedByPrecheck);
1783 
1784  /*
1785  * If the result of prechecking required keys was true, then in
1786  * assert-enabled builds we also recheck that the _bt_checkkeys()
1787  * result is the same.
1788  */
1789  Assert(!requiredMatchedByPrecheck ||
1790  passes_quals == _bt_checkkeys(scan, itup, indnatts, dir,
1791  &continuescan, false));
1792  if (passes_quals && tuple_alive)
1793  {
1794  /* tuple passes all scan key conditions */
1795  if (!BTreeTupleIsPosting(itup))
1796  {
1797  /* Remember it */
1798  itemIndex--;
1799  _bt_saveitem(so, itemIndex, offnum, itup);
1800  }
1801  else
1802  {
1803  int tupleOffset;
1804 
1805  /*
1806  * Set up state to return posting list, and remember first
1807  * TID.
1808  *
1809  * Note that we deliberately save/return items from
1810  * posting lists in ascending heap TID order for backwards
1811  * scans. This allows _bt_killitems() to make a
1812  * consistent assumption about the order of items
1813  * associated with the same posting list tuple.
1814  */
1815  itemIndex--;
1816  tupleOffset =
1817  _bt_setuppostingitems(so, itemIndex, offnum,
1818  BTreeTupleGetPostingN(itup, 0),
1819  itup);
1820  /* Remember additional TIDs */
1821  for (int i = 1; i < BTreeTupleGetNPosting(itup); i++)
1822  {
1823  itemIndex--;
1824  _bt_savepostingitem(so, itemIndex, offnum,
1825  BTreeTupleGetPostingN(itup, i),
1826  tupleOffset);
1827  }
1828  }
1829  }
1830  if (!continuescan)
1831  {
1832  /* there can't be any more matches, so stop */
1833  so->currPos.moreLeft = false;
1834  break;
1835  }
1836 
1837  offnum = OffsetNumberPrev(offnum);
1838  }
1839 
1840  Assert(itemIndex >= 0);
1841  so->currPos.firstItem = itemIndex;
1844  }
1845 
1846  return (so->currPos.firstItem <= so->currPos.lastItem);
1847 }
1848 
1849 /* Save an index item into so->currPos.items[itemIndex] */
1850 static void
1851 _bt_saveitem(BTScanOpaque so, int itemIndex,
1852  OffsetNumber offnum, IndexTuple itup)
1853 {
1854  BTScanPosItem *currItem = &so->currPos.items[itemIndex];
1855 
1856  Assert(!BTreeTupleIsPivot(itup) && !BTreeTupleIsPosting(itup));
1857 
1858  currItem->heapTid = itup->t_tid;
1859  currItem->indexOffset = offnum;
1860  if (so->currTuples)
1861  {
1862  Size itupsz = IndexTupleSize(itup);
1863 
1864  currItem->tupleOffset = so->currPos.nextTupleOffset;
1865  memcpy(so->currTuples + so->currPos.nextTupleOffset, itup, itupsz);
1866  so->currPos.nextTupleOffset += MAXALIGN(itupsz);
1867  }
1868 }
1869 
1870 /*
1871  * Setup state to save TIDs/items from a single posting list tuple.
1872  *
1873  * Saves an index item into so->currPos.items[itemIndex] for TID that is
1874  * returned to scan first. Second or subsequent TIDs for posting list should
1875  * be saved by calling _bt_savepostingitem().
1876  *
1877  * Returns an offset into tuple storage space that main tuple is stored at if
1878  * needed.
1879  */
1880 static int
1882  ItemPointer heapTid, IndexTuple itup)
1883 {
1884  BTScanPosItem *currItem = &so->currPos.items[itemIndex];
1885 
1886  Assert(BTreeTupleIsPosting(itup));
1887 
1888  currItem->heapTid = *heapTid;
1889  currItem->indexOffset = offnum;
1890  if (so->currTuples)
1891  {
1892  /* Save base IndexTuple (truncate posting list) */
1893  IndexTuple base;
1894  Size itupsz = BTreeTupleGetPostingOffset(itup);
1895 
1896  itupsz = MAXALIGN(itupsz);
1897  currItem->tupleOffset = so->currPos.nextTupleOffset;
1898  base = (IndexTuple) (so->currTuples + so->currPos.nextTupleOffset);
1899  memcpy(base, itup, itupsz);
1900  /* Defensively reduce work area index tuple header size */
1901  base->t_info &= ~INDEX_SIZE_MASK;
1902  base->t_info |= itupsz;
1903  so->currPos.nextTupleOffset += itupsz;
1904 
1905  return currItem->tupleOffset;
1906  }
1907 
1908  return 0;
1909 }
1910 
1911 /*
1912  * Save an index item into so->currPos.items[itemIndex] for current posting
1913  * tuple.
1914  *
1915  * Assumes that _bt_setuppostingitems() has already been called for current
1916  * posting list tuple. Caller passes its return value as tupleOffset.
1917  */
1918 static inline void
1920  ItemPointer heapTid, int tupleOffset)
1921 {
1922  BTScanPosItem *currItem = &so->currPos.items[itemIndex];
1923 
1924  currItem->heapTid = *heapTid;
1925  currItem->indexOffset = offnum;
1926 
1927  /*
1928  * Have index-only scans return the same base IndexTuple for every TID
1929  * that originates from the same posting list
1930  */
1931  if (so->currTuples)
1932  currItem->tupleOffset = tupleOffset;
1933 }
1934 
1935 /*
1936  * _bt_steppage() -- Step to next page containing valid data for scan
1937  *
1938  * On entry, if so->currPos.buf is valid the buffer is pinned but not locked;
1939  * if pinned, we'll drop the pin before moving to next page. The buffer is
1940  * not locked on entry.
1941  *
1942  * For success on a scan using a non-MVCC snapshot we hold a pin, but not a
1943  * read lock, on that page. If we do not hold the pin, we set so->currPos.buf
1944  * to InvalidBuffer. We return true to indicate success.
1945  */
1946 static bool
1948 {
1949  BTScanOpaque so = (BTScanOpaque) scan->opaque;
1951  bool status;
1952 
1954 
1955  /* Before leaving current page, deal with any killed items */
1956  if (so->numKilled > 0)
1957  _bt_killitems(scan);
1958 
1959  /*
1960  * Before we modify currPos, make a copy of the page data if there was a
1961  * mark position that needs it.
1962  */
1963  if (so->markItemIndex >= 0)
1964  {
1965  /* bump pin on current buffer for assignment to mark buffer */
1966  if (BTScanPosIsPinned(so->currPos))
1968  memcpy(&so->markPos, &so->currPos,
1969  offsetof(BTScanPosData, items[1]) +
1970  so->currPos.lastItem * sizeof(BTScanPosItem));
1971  if (so->markTuples)
1972  memcpy(so->markTuples, so->currTuples,
1973  so->currPos.nextTupleOffset);
1974  so->markPos.itemIndex = so->markItemIndex;
1975  so->markItemIndex = -1;
1976  }
1977 
1978  if (ScanDirectionIsForward(dir))
1979  {
1980  /* Walk right to the next page with data */
1981  if (scan->parallel_scan != NULL)
1982  {
1983  /*
1984  * Seize the scan to get the next block number; if the scan has
1985  * ended already, bail out.
1986  */
1987  status = _bt_parallel_seize(scan, &blkno);
1988  if (!status)
1989  {
1990  /* release the previous buffer, if pinned */
1993  return false;
1994  }
1995  }
1996  else
1997  {
1998  /* Not parallel, so use the previously-saved nextPage link. */
1999  blkno = so->currPos.nextPage;
2000  }
2001 
2002  /* Remember we left a page with data */
2003  so->currPos.moreLeft = true;
2004 
2005  /* release the previous buffer, if pinned */
2007  }
2008  else
2009  {
2010  /* Remember we left a page with data */
2011  so->currPos.moreRight = true;
2012 
2013  if (scan->parallel_scan != NULL)
2014  {
2015  /*
2016  * Seize the scan to get the current block number; if the scan has
2017  * ended already, bail out.
2018  */
2019  status = _bt_parallel_seize(scan, &blkno);
2021  if (!status)
2022  {
2024  return false;
2025  }
2026  }
2027  else
2028  {
2029  /* Not parallel, so just use our own notion of the current page */
2030  blkno = so->currPos.currPage;
2031  }
2032  }
2033 
2034  if (!_bt_readnextpage(scan, blkno, dir))
2035  return false;
2036 
2037  /* Drop the lock, and maybe the pin, on the current page */
2039 
2040  return true;
2041 }
2042 
2043 /*
2044  * _bt_readnextpage() -- Read next page containing valid data for scan
2045  *
2046  * On success exit, so->currPos is updated to contain data from the next
2047  * interesting page. Caller is responsible to release lock and pin on
2048  * buffer on success. We return true to indicate success.
2049  *
2050  * If there are no more matching records in the given direction, we drop all
2051  * locks and pins, set so->currPos.buf to InvalidBuffer, and return false.
2052  */
2053 static bool
2055 {
2056  BTScanOpaque so = (BTScanOpaque) scan->opaque;
2057  Relation rel;
2058  Page page;
2059  BTPageOpaque opaque;
2060  bool status;
2061 
2062  rel = scan->indexRelation;
2063 
2064  if (ScanDirectionIsForward(dir))
2065  {
2066  for (;;)
2067  {
2068  /*
2069  * if we're at end of scan, give up and mark parallel scan as
2070  * done, so that all the workers can finish their scan
2071  */
2072  if (blkno == P_NONE || !so->currPos.moreRight)
2073  {
2074  _bt_parallel_done(scan);
2076  return false;
2077  }
2078  /* check for interrupts while we're not holding any buffer lock */
2080  /* step right one page */
2081  so->currPos.buf = _bt_getbuf(rel, blkno, BT_READ);
2082  page = BufferGetPage(so->currPos.buf);
2083  opaque = BTPageGetOpaque(page);
2084  /* check for deleted page */
2085  if (!P_IGNORE(opaque))
2086  {
2087  PredicateLockPage(rel, blkno, scan->xs_snapshot);
2088  /* see if there are any matches on this page */
2089  /* note that this will clear moreRight if we can stop */
2090  if (_bt_readpage(scan, dir, P_FIRSTDATAKEY(opaque)))
2091  break;
2092  }
2093  else if (scan->parallel_scan != NULL)
2094  {
2095  /* allow next page be processed by parallel worker */
2096  _bt_parallel_release(scan, opaque->btpo_next);
2097  }
2098 
2099  /* nope, keep going */
2100  if (scan->parallel_scan != NULL)
2101  {
2102  _bt_relbuf(rel, so->currPos.buf);
2103  status = _bt_parallel_seize(scan, &blkno);
2104  if (!status)
2105  {
2107  return false;
2108  }
2109  }
2110  else
2111  {
2112  blkno = opaque->btpo_next;
2113  _bt_relbuf(rel, so->currPos.buf);
2114  }
2115  }
2116  }
2117  else
2118  {
2119  /*
2120  * Should only happen in parallel cases, when some other backend
2121  * advanced the scan.
2122  */
2123  if (so->currPos.currPage != blkno)
2124  {
2126  so->currPos.currPage = blkno;
2127  }
2128 
2129  /*
2130  * Walk left to the next page with data. This is much more complex
2131  * than the walk-right case because of the possibility that the page
2132  * to our left splits while we are in flight to it, plus the
2133  * possibility that the page we were on gets deleted after we leave
2134  * it. See nbtree/README for details.
2135  *
2136  * It might be possible to rearrange this code to have less overhead
2137  * in pinning and locking, but that would require capturing the left
2138  * pointer when the page is initially read, and using it here, along
2139  * with big changes to _bt_walk_left() and the code below. It is not
2140  * clear whether this would be a win, since if the page immediately to
2141  * the left splits after we read this page and before we step left, we
2142  * would need to visit more pages than with the current code.
2143  *
2144  * Note that if we change the code so that we drop the pin for a scan
2145  * which uses a non-MVCC snapshot, we will need to modify the code for
2146  * walking left, to allow for the possibility that a referenced page
2147  * has been deleted. As long as the buffer is pinned or the snapshot
2148  * is MVCC the page cannot move past the half-dead state to fully
2149  * deleted.
2150  */
2151  if (BTScanPosIsPinned(so->currPos))
2152  _bt_lockbuf(rel, so->currPos.buf, BT_READ);
2153  else
2154  so->currPos.buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ);
2155 
2156  for (;;)
2157  {
2158  /* Done if we know there are no matching keys to the left */
2159  if (!so->currPos.moreLeft)
2160  {
2161  _bt_relbuf(rel, so->currPos.buf);
2162  _bt_parallel_done(scan);
2164  return false;
2165  }
2166 
2167  /* Step to next physical page */
2168  so->currPos.buf = _bt_walk_left(rel, so->currPos.buf);
2169 
2170  /* if we're physically at end of index, return failure */
2171  if (so->currPos.buf == InvalidBuffer)
2172  {
2173  _bt_parallel_done(scan);
2175  return false;
2176  }
2177 
2178  /*
2179  * Okay, we managed to move left to a non-deleted page. Done if
2180  * it's not half-dead and contains matching tuples. Else loop back
2181  * and do it all again.
2182  */
2183  page = BufferGetPage(so->currPos.buf);
2184  opaque = BTPageGetOpaque(page);
2185  if (!P_IGNORE(opaque))
2186  {
2188  /* see if there are any matches on this page */
2189  /* note that this will clear moreLeft if we can stop */
2190  if (_bt_readpage(scan, dir, PageGetMaxOffsetNumber(page)))
2191  break;
2192  }
2193  else if (scan->parallel_scan != NULL)
2194  {
2195  /* allow next page be processed by parallel worker */
2197  }
2198 
2199  /*
2200  * For parallel scans, get the last page scanned as it is quite
2201  * possible that by the time we try to seize the scan, some other
2202  * worker has already advanced the scan to a different page. We
2203  * must continue based on the latest page scanned by any worker.
2204  */
2205  if (scan->parallel_scan != NULL)
2206  {
2207  _bt_relbuf(rel, so->currPos.buf);
2208  status = _bt_parallel_seize(scan, &blkno);
2209  if (!status)
2210  {
2212  return false;
2213  }
2214  so->currPos.buf = _bt_getbuf(rel, blkno, BT_READ);
2215  }
2216  }
2217  }
2218 
2219  return true;
2220 }
2221 
2222 /*
2223  * _bt_parallel_readpage() -- Read current page containing valid data for scan
2224  *
2225  * On success, release lock and maybe pin on buffer. We return true to
2226  * indicate success.
2227  */
2228 static bool
2230 {
2231  BTScanOpaque so = (BTScanOpaque) scan->opaque;
2232 
2233  _bt_initialize_more_data(so, dir);
2234 
2235  if (!_bt_readnextpage(scan, blkno, dir))
2236  return false;
2237 
2238  /* Drop the lock, and maybe the pin, on the current page */
2240 
2241  return true;
2242 }
2243 
2244 /*
2245  * _bt_walk_left() -- step left one page, if possible
2246  *
2247  * The given buffer must be pinned and read-locked. This will be dropped
2248  * before stepping left. On return, we have pin and read lock on the
2249  * returned page, instead.
2250  *
2251  * Returns InvalidBuffer if there is no page to the left (no lock is held
2252  * in that case).
2253  *
2254  * When working on a non-leaf level, it is possible for the returned page
2255  * to be half-dead; the caller should check that condition and step left
2256  * again if it's important.
2257  */
2258 static Buffer
2260 {
2261  Page page;
2262  BTPageOpaque opaque;
2263 
2264  page = BufferGetPage(buf);
2265  opaque = BTPageGetOpaque(page);
2266 
2267  for (;;)
2268  {
2269  BlockNumber obknum;
2270  BlockNumber lblkno;
2271  BlockNumber blkno;
2272  int tries;
2273 
2274  /* if we're at end of tree, release buf and return failure */
2275  if (P_LEFTMOST(opaque))
2276  {
2277  _bt_relbuf(rel, buf);
2278  break;
2279  }
2280  /* remember original page we are stepping left from */
2281  obknum = BufferGetBlockNumber(buf);
2282  /* step left */
2283  blkno = lblkno = opaque->btpo_prev;
2284  _bt_relbuf(rel, buf);
2285  /* check for interrupts while we're not holding any buffer lock */
2287  buf = _bt_getbuf(rel, blkno, BT_READ);
2288  page = BufferGetPage(buf);
2289  opaque = BTPageGetOpaque(page);
2290 
2291  /*
2292  * If this isn't the page we want, walk right till we find what we
2293  * want --- but go no more than four hops (an arbitrary limit). If we
2294  * don't find the correct page by then, the most likely bet is that
2295  * the original page got deleted and isn't in the sibling chain at all
2296  * anymore, not that its left sibling got split more than four times.
2297  *
2298  * Note that it is correct to test P_ISDELETED not P_IGNORE here,
2299  * because half-dead pages are still in the sibling chain. Caller
2300  * must reject half-dead pages if wanted.
2301  */
2302  tries = 0;
2303  for (;;)
2304  {
2305  if (!P_ISDELETED(opaque) && opaque->btpo_next == obknum)
2306  {
2307  /* Found desired page, return it */
2308  return buf;
2309  }
2310  if (P_RIGHTMOST(opaque) || ++tries > 4)
2311  break;
2312  blkno = opaque->btpo_next;
2313  buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
2314  page = BufferGetPage(buf);
2315  opaque = BTPageGetOpaque(page);
2316  }
2317 
2318  /* Return to the original page to see what's up */
2319  buf = _bt_relandgetbuf(rel, buf, obknum, BT_READ);
2320  page = BufferGetPage(buf);
2321  opaque = BTPageGetOpaque(page);
2322  if (P_ISDELETED(opaque))
2323  {
2324  /*
2325  * It was deleted. Move right to first nondeleted page (there
2326  * must be one); that is the page that has acquired the deleted
2327  * one's keyspace, so stepping left from it will take us where we
2328  * want to be.
2329  */
2330  for (;;)
2331  {
2332  if (P_RIGHTMOST(opaque))
2333  elog(ERROR, "fell off the end of index \"%s\"",
2335  blkno = opaque->btpo_next;
2336  buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
2337  page = BufferGetPage(buf);
2338  opaque = BTPageGetOpaque(page);
2339  if (!P_ISDELETED(opaque))
2340  break;
2341  }
2342 
2343  /*
2344  * Now return to top of loop, resetting obknum to point to this
2345  * nondeleted page, and try again.
2346  */
2347  }
2348  else
2349  {
2350  /*
2351  * It wasn't deleted; the explanation had better be that the page
2352  * to the left got split or deleted. Without this check, we'd go
2353  * into an infinite loop if there's anything wrong.
2354  */
2355  if (opaque->btpo_prev == lblkno)
2356  elog(ERROR, "could not find left sibling of block %u in index \"%s\"",
2357  obknum, RelationGetRelationName(rel));
2358  /* Okay to try again with new lblkno value */
2359  }
2360  }
2361 
2362  return InvalidBuffer;
2363 }
2364 
2365 /*
2366  * _bt_get_endpoint() -- Find the first or last page on a given tree level
2367  *
2368  * If the index is empty, we will return InvalidBuffer; any other failure
2369  * condition causes ereport(). We will not return a dead page.
2370  *
2371  * The returned buffer is pinned and read-locked.
2372  */
2373 Buffer
2374 _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
2375 {
2376  Buffer buf;
2377  Page page;
2378  BTPageOpaque opaque;
2379  OffsetNumber offnum;
2380  BlockNumber blkno;
2381  IndexTuple itup;
2382 
2383  /*
2384  * If we are looking for a leaf page, okay to descend from fast root;
2385  * otherwise better descend from true root. (There is no point in being
2386  * smarter about intermediate levels.)
2387  */
2388  if (level == 0)
2389  buf = _bt_getroot(rel, NULL, BT_READ);
2390  else
2391  buf = _bt_gettrueroot(rel);
2392 
2393  if (!BufferIsValid(buf))
2394  return InvalidBuffer;
2395 
2396  page = BufferGetPage(buf);
2397  opaque = BTPageGetOpaque(page);
2398 
2399  for (;;)
2400  {
2401  /*
2402  * If we landed on a deleted page, step right to find a live page
2403  * (there must be one). Also, if we want the rightmost page, step
2404  * right if needed to get to it (this could happen if the page split
2405  * since we obtained a pointer to it).
2406  */
2407  while (P_IGNORE(opaque) ||
2408  (rightmost && !P_RIGHTMOST(opaque)))
2409  {
2410  blkno = opaque->btpo_next;
2411  if (blkno == P_NONE)
2412  elog(ERROR, "fell off the end of index \"%s\"",
2414  buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
2415  page = BufferGetPage(buf);
2416  opaque = BTPageGetOpaque(page);
2417  }
2418 
2419  /* Done? */
2420  if (opaque->btpo_level == level)
2421  break;
2422  if (opaque->btpo_level < level)
2423  ereport(ERROR,
2424  (errcode(ERRCODE_INDEX_CORRUPTED),
2425  errmsg_internal("btree level %u not found in index \"%s\"",
2426  level, RelationGetRelationName(rel))));
2427 
2428  /* Descend to leftmost or rightmost child page */
2429  if (rightmost)
2430  offnum = PageGetMaxOffsetNumber(page);
2431  else
2432  offnum = P_FIRSTDATAKEY(opaque);
2433 
2434  itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
2435  blkno = BTreeTupleGetDownLink(itup);
2436 
2437  buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
2438  page = BufferGetPage(buf);
2439  opaque = BTPageGetOpaque(page);
2440  }
2441 
2442  return buf;
2443 }
2444 
2445 /*
2446  * _bt_endpoint() -- Find the first or last page in the index, and scan
2447  * from there to the first key satisfying all the quals.
2448  *
2449  * This is used by _bt_first() to set up a scan when we've determined
2450  * that the scan must start at the beginning or end of the index (for
2451  * a forward or backward scan respectively). Exit conditions are the
2452  * same as for _bt_first().
2453  */
2454 static bool
2456 {
2457  Relation rel = scan->indexRelation;
2458  BTScanOpaque so = (BTScanOpaque) scan->opaque;
2459  Buffer buf;
2460  Page page;
2461  BTPageOpaque opaque;
2462  OffsetNumber start;
2463  BTScanPosItem *currItem;
2464 
2465  /*
2466  * Scan down to the leftmost or rightmost leaf page. This is a simplified
2467  * version of _bt_search(). We don't maintain a stack since we know we
2468  * won't need it.
2469  */
2471 
2472  if (!BufferIsValid(buf))
2473  {
2474  /*
2475  * Empty index. Lock the whole relation, as nothing finer to lock
2476  * exists.
2477  */
2478  PredicateLockRelation(rel, scan->xs_snapshot);
2480  return false;
2481  }
2482 
2484  page = BufferGetPage(buf);
2485  opaque = BTPageGetOpaque(page);
2486  Assert(P_ISLEAF(opaque));
2487 
2488  if (ScanDirectionIsForward(dir))
2489  {
2490  /* There could be dead pages to the left, so not this: */
2491  /* Assert(P_LEFTMOST(opaque)); */
2492 
2493  start = P_FIRSTDATAKEY(opaque);
2494  }
2495  else if (ScanDirectionIsBackward(dir))
2496  {
2497  Assert(P_RIGHTMOST(opaque));
2498 
2499  start = PageGetMaxOffsetNumber(page);
2500  }
2501  else
2502  {
2503  elog(ERROR, "invalid scan direction: %d", (int) dir);
2504  start = 0; /* keep compiler quiet */
2505  }
2506 
2507  /* remember which buffer we have pinned */
2508  so->currPos.buf = buf;
2509  so->firstPage = true;
2510 
2511  _bt_initialize_more_data(so, dir);
2512 
2513  /*
2514  * Now load data from the first page of the scan.
2515  */
2516  if (!_bt_readpage(scan, dir, start))
2517  {
2518  /*
2519  * There's no actually-matching data on this page. Try to advance to
2520  * the next page. Return false if there's no matching data at all.
2521  */
2522  _bt_unlockbuf(scan->indexRelation, so->currPos.buf);
2523  if (!_bt_steppage(scan, dir))
2524  return false;
2525  }
2526  else
2527  {
2528  /* Drop the lock, and maybe the pin, on the current page */
2530  }
2531 
2532  /* OK, itemIndex says what to return */
2533  currItem = &so->currPos.items[so->currPos.itemIndex];
2534  scan->xs_heaptid = currItem->heapTid;
2535  if (scan->xs_want_itup)
2536  scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset);
2537 
2538  return true;
2539 }
2540 
2541 /*
2542  * _bt_initialize_more_data() -- initialize moreLeft/moreRight appropriately
2543  * for scan direction
2544  */
2545 static inline void
2547 {
2548  /* initialize moreLeft/moreRight appropriately for scan direction */
2549  if (ScanDirectionIsForward(dir))
2550  {
2551  so->currPos.moreLeft = false;
2552  so->currPos.moreRight = true;
2553  }
2554  else
2555  {
2556  so->currPos.moreLeft = true;
2557  so->currPos.moreRight = false;
2558  }
2559  so->numKilled = 0; /* just paranoia */
2560  so->markItemIndex = -1; /* ditto */
2561 }
int16 AttrNumber
Definition: attnum.h:21
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
void IncrBufferRefCount(Buffer buffer)
Definition: bufmgr.c:4605
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3386
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4573
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:3647
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:350
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:301
Pointer Page
Definition: bufpage.h:78
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
unsigned int uint32
Definition: c.h:495
#define RegProcedureIsValid(p)
Definition: c.h:766
#define Min(x, y)
Definition: c.h:993
#define INVERT_COMPARE_RESULT(var)
Definition: c.h:1119
#define MAXALIGN(LEN)
Definition: c.h:800
signed int int32
Definition: c.h:483
#define Max(x, y)
Definition: c.h:987
regproc RegProcedure
Definition: c.h:639
#define unlikely(x)
Definition: c.h:300
size_t Size
Definition: c.h:594
struct cursor * cur
Definition: ecpg.c:28
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1156
int errcode(int sqlerrcode)
Definition: elog.c:858
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:1132
FmgrInfo * index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:826
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:51
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:117
#define INDEX_SIZE_MASK
Definition: itup.h:65
Assert(fmt[strlen(fmt) - 1] !='\n')
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:795
void * palloc(Size size)
Definition: mcxt.c:1226
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
void _bt_finish_split(Relation rel, Relation heaprel, Buffer lbuf, BTStack stack)
Definition: nbtinsert.c:2241
Buffer _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
Definition: nbtpage.c:1003
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1023
Buffer _bt_gettrueroot(Relation rel)
Definition: nbtpage.c:580
void _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
Definition: nbtpage.c:739
Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access)
Definition: nbtpage.c:845
void _bt_unlockbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1070
void _bt_lockbuf(Relation rel, Buffer buf, int access)
Definition: nbtpage.c:1039
Buffer _bt_getroot(Relation rel, Relation heaprel, int access)
Definition: nbtpage.c:344
void _bt_parallel_release(IndexScanDesc scan, BlockNumber scan_page)
Definition: nbtree.c:699
void _bt_parallel_done(IndexScanDesc scan)
Definition: nbtree.c:722
bool _bt_parallel_seize(IndexScanDesc scan, BlockNumber *pageno)
Definition: nbtree.c:641
#define BTScanPosIsPinned(scanpos)
Definition: nbtree.h:995
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:518
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:480
BTStackData * BTStack
Definition: nbtree.h:739
#define P_ISLEAF(opaque)
Definition: nbtree.h:220
#define P_HIKEY
Definition: nbtree.h:367
#define BTORDER_PROC
Definition: nbtree.h:707
#define P_LEFTMOST(opaque)
Definition: nbtree.h:218
#define BTPageGetOpaque(page)
Definition: nbtree.h:73
#define P_ISDELETED(opaque)
Definition: nbtree.h:222
#define MaxTIDsPerBTreePage
Definition: nbtree.h:185
#define BTScanPosIsValid(scanpos)
Definition: nbtree.h:1012
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:369
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition: nbtree.h:529
#define P_NONE
Definition: nbtree.h:212
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:219
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1095
#define P_INCOMPLETE_SPLIT(opaque)
Definition: nbtree.h:227
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:544
#define BT_READ
Definition: nbtree.h:719
static BlockNumber BTreeTupleGetDownLink(IndexTuple pivot)
Definition: nbtree.h:556
#define SK_BT_DESC
Definition: nbtree.h:1094
#define P_IGNORE(opaque)
Definition: nbtree.h:225
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:664
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:492
#define BTScanPosInvalidate(scanpos)
Definition: nbtree.h:1018
#define BTScanPosUnpinIfPinned(scanpos)
Definition: nbtree.h:1006
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:638
#define BT_WRITE
Definition: nbtree.h:720
#define BTreeTupleGetNAtts(itup, rel)
Definition: nbtree.h:577
BTScanOpaqueData * BTScanOpaque
Definition: nbtree.h:1084
Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
Definition: nbtsearch.c:2374
static bool _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir)
Definition: nbtsearch.c:2054
Buffer _bt_moveright(Relation rel, Relation heaprel, BTScanInsert key, Buffer buf, bool forupdate, BTStack stack, int access)
Definition: nbtsearch.c:235
static int _bt_binsrch_posting(BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:570
bool _bt_first(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:860
static void _bt_saveitem(BTScanOpaque so, int itemIndex, OffsetNumber offnum, IndexTuple itup)
Definition: nbtsearch.c:1851
static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum)
Definition: nbtsearch.c:1533
static bool _bt_parallel_readpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir)
Definition: nbtsearch.c:2229
static int _bt_setuppostingitems(BTScanOpaque so, int itemIndex, OffsetNumber offnum, ItemPointer heapTid, IndexTuple itup)
Definition: nbtsearch.c:1881
static Buffer _bt_walk_left(Relation rel, Buffer buf)
Definition: nbtsearch.c:2259
static void _bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp)
Definition: nbtsearch.c:61
static OffsetNumber _bt_binsrch(Relation rel, BTScanInsert key, Buffer buf)
Definition: nbtsearch.c:338
static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:2455
static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:1947
static void _bt_savepostingitem(BTScanOpaque so, int itemIndex, OffsetNumber offnum, ItemPointer heapTid, int tupleOffset)
Definition: nbtsearch.c:1919
BTStack _bt_search(Relation rel, Relation heaprel, BTScanInsert key, Buffer *bufP, int access)
Definition: nbtsearch.c:96
OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate)
Definition: nbtsearch.c:442
bool _bt_next(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:1478
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:656
static void _bt_initialize_more_data(BTScanOpaque so, ScanDirection dir)
Definition: nbtsearch.c:2546
bool _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple, int tupnatts, ScanDirection dir, bool *continuescan, bool requiredMatchedByPrecheck)
Definition: nbtutils.c:1379
void _bt_freestack(BTStack stack)
Definition: nbtutils.c:182
void _bt_killitems(IndexScanDesc scan)
Definition: nbtutils.c:1773
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition: nbtutils.c:2519
void _bt_preprocess_keys(IndexScanDesc scan)
Definition: nbtutils.c:764
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
#define INDEX_MAX_KEYS
static char * buf
Definition: pg_test_fsync.c:73
#define pgstat_count_index_scan(rel)
Definition: pgstat.h:622
uintptr_t Datum
Definition: postgres.h:64
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:202
#define InvalidOid
Definition: postgres_ext.h:36
void PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
Definition: predicate.c:2550
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2527
short access
Definition: preproc-type.c:36
#define RelationGetDescr(relation)
Definition: rel.h:530
#define RelationGetRelationName(relation)
Definition: rel.h:538
#define RelationNeedsWAL(relation)
Definition: rel.h:629
#define IndexRelationGetNumberOfAttributes(relation)
Definition: rel.h:516
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:523
void ScanKeyEntryInitialize(ScanKey entry, int flags, AttrNumber attributeNumber, StrategyNumber strategy, Oid subtype, Oid collation, RegProcedure procedure, Datum argument)
Definition: scankey.c:32
void ScanKeyEntryInitializeWithInfo(ScanKey entry, int flags, AttrNumber attributeNumber, StrategyNumber strategy, Oid subtype, Oid collation, FmgrInfo *finfo, Datum argument)
Definition: scankey.c:101
#define ScanDirectionIsForward(direction)
Definition: sdir.h:64
#define ScanDirectionIsBackward(direction)
Definition: sdir.h:50
ScanDirection
Definition: sdir.h:25
#define SK_ROW_HEADER
Definition: skey.h:117
#define SK_ROW_MEMBER
Definition: skey.h:118
#define SK_SEARCHNOTNULL
Definition: skey.h:122
#define SK_ROW_END
Definition: skey.h:119
ScanKeyData * ScanKey
Definition: skey.h:75
#define SK_ISNULL
Definition: skey.h:115
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:62
uint16 StrategyNumber
Definition: stratnum.h:22
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define InvalidStrategy
Definition: stratnum.h:24
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define BTLessEqualStrategyNumber
Definition: stratnum.h:30
#define BTGreaterEqualStrategyNumber
Definition: stratnum.h:32
OffsetNumber stricthigh
Definition: nbtree.h:830
bool bounds_valid
Definition: nbtree.h:828
OffsetNumber low
Definition: nbtree.h:829
BTScanInsert itup_key
Definition: nbtree.h:818
BlockNumber btpo_next
Definition: nbtree.h:65
BlockNumber btpo_prev
Definition: nbtree.h:64
uint32 btpo_level
Definition: nbtree.h:66
char * markTuples
Definition: nbtree.h:1068
BTScanPosData currPos
Definition: nbtree.h:1080
char * currTuples
Definition: nbtree.h:1067
BTScanPosData markPos
Definition: nbtree.h:1081
ScanKey keyData
Definition: nbtree.h:1042
bool moreRight
Definition: nbtree.h:971
Buffer buf
Definition: nbtree.h:958
BlockNumber currPage
Definition: nbtree.h:961
int firstItem
Definition: nbtree.h:986
int nextTupleOffset
Definition: nbtree.h:977
BlockNumber nextPage
Definition: nbtree.h:962
bool moreLeft
Definition: nbtree.h:970
int lastItem
Definition: nbtree.h:987
BTScanPosItem items[MaxTIDsPerBTreePage]
Definition: nbtree.h:990
int itemIndex
Definition: nbtree.h:988
XLogRecPtr lsn
Definition: nbtree.h:960
ItemPointerData heapTid
Definition: nbtree.h:951
LocationIndex tupleOffset
Definition: nbtree.h:953
OffsetNumber indexOffset
Definition: nbtree.h:952
BlockNumber bts_blkno
Definition: nbtree.h:734
struct BTStackData * bts_parent
Definition: nbtree.h:736
OffsetNumber bts_offset
Definition: nbtree.h:735
Definition: fmgr.h:57
struct ParallelIndexScanDescData * parallel_scan
Definition: relscan.h:166
bool ignore_killed_tuples
Definition: relscan.h:129
IndexTuple xs_itup
Definition: relscan.h:142
Relation indexRelation
Definition: relscan.h:118
ItemPointerData xs_heaptid
Definition: relscan.h:147
struct SnapshotData * xs_snapshot
Definition: relscan.h:119
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49
Oid * rd_opcintype
Definition: rel.h:207
Oid * rd_opfamily
Definition: rel.h:206
int sk_flags
Definition: skey.h:66
Datum sk_argument
Definition: skey.h:72
FmgrInfo sk_func
Definition: skey.h:71
Oid sk_collation
Definition: skey.h:70
StrategyNumber sk_strategy
Definition: skey.h:68
AttrNumber sk_attno
Definition: skey.h:67
#define IsolationIsSerializable()
Definition: xact.h:52