PostgreSQL Source Code git master
nbtutils.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nbtutils.c
4 * Utility code for Postgres btree implementation.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/nbtree/nbtutils.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <time.h>
19
20#include "access/nbtree.h"
21#include "access/reloptions.h"
22#include "commands/progress.h"
23#include "miscadmin.h"
24#include "utils/datum.h"
25#include "utils/lsyscache.h"
26
27#define LOOK_AHEAD_REQUIRED_RECHECKS 3
28#define LOOK_AHEAD_DEFAULT_DISTANCE 5
29
30static inline int32 _bt_compare_array_skey(FmgrInfo *orderproc,
31 Datum tupdatum, bool tupnull,
32 Datum arrdatum, ScanKey cur);
36 IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
37 bool readpagetup, int sktrig, bool *scanBehind);
39 IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
40 int sktrig, bool sktrig_required);
41#ifdef USE_ASSERT_CHECKING
42static bool _bt_verify_arrays_bt_first(IndexScanDesc scan, ScanDirection dir);
43static bool _bt_verify_keys_with_arraykeys(IndexScanDesc scan);
44#endif
46 IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
47 bool advancenonrequired, bool prechecked, bool firstmatch,
48 bool *continuescan, int *ikey);
49static bool _bt_check_rowcompare(ScanKey skey,
50 IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
51 ScanDirection dir, bool *continuescan);
53 int tupnatts, TupleDesc tupdesc);
54static int _bt_keep_natts(Relation rel, IndexTuple lastleft,
55 IndexTuple firstright, BTScanInsert itup_key);
56
57
58/*
59 * _bt_mkscankey
60 * Build an insertion scan key that contains comparison data from itup
61 * as well as comparator routines appropriate to the key datatypes.
62 *
63 * The result is intended for use with _bt_compare() and _bt_truncate().
64 * Callers that don't need to fill out the insertion scankey arguments
65 * (e.g. they use an ad-hoc comparison routine, or only need a scankey
66 * for _bt_truncate()) can pass a NULL index tuple. The scankey will
67 * be initialized as if an "all truncated" pivot tuple was passed
68 * instead.
69 *
70 * Note that we may occasionally have to share lock the metapage to
71 * determine whether or not the keys in the index are expected to be
72 * unique (i.e. if this is a "heapkeyspace" index). We assume a
73 * heapkeyspace index when caller passes a NULL tuple, allowing index
74 * build callers to avoid accessing the non-existent metapage. We
75 * also assume that the index is _not_ allequalimage when a NULL tuple
76 * is passed; CREATE INDEX callers call _bt_allequalimage() to set the
77 * field themselves.
78 */
81{
83 ScanKey skey;
84 TupleDesc itupdesc;
85 int indnkeyatts;
86 int16 *indoption;
87 int tupnatts;
88 int i;
89
90 itupdesc = RelationGetDescr(rel);
92 indoption = rel->rd_indoption;
93 tupnatts = itup ? BTreeTupleGetNAtts(itup, rel) : 0;
94
96
97 /*
98 * We'll execute search using scan key constructed on key columns.
99 * Truncated attributes and non-key attributes are omitted from the final
100 * scan key.
101 */
102 key = palloc(offsetof(BTScanInsertData, scankeys) +
103 sizeof(ScanKeyData) * indnkeyatts);
104 if (itup)
105 _bt_metaversion(rel, &key->heapkeyspace, &key->allequalimage);
106 else
107 {
108 /* Utility statement callers can set these fields themselves */
109 key->heapkeyspace = true;
110 key->allequalimage = false;
111 }
112 key->anynullkeys = false; /* initial assumption */
113 key->nextkey = false; /* usual case, required by btinsert */
114 key->backward = false; /* usual case, required by btinsert */
115 key->keysz = Min(indnkeyatts, tupnatts);
116 key->scantid = key->heapkeyspace && itup ?
117 BTreeTupleGetHeapTID(itup) : NULL;
118 skey = key->scankeys;
119 for (i = 0; i < indnkeyatts; i++)
120 {
121 FmgrInfo *procinfo;
122 Datum arg;
123 bool null;
124 int flags;
125
126 /*
127 * We can use the cached (default) support procs since no cross-type
128 * comparison can be needed.
129 */
130 procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
131
132 /*
133 * Key arguments built from truncated attributes (or when caller
134 * provides no tuple) are defensively represented as NULL values. They
135 * should never be used.
136 */
137 if (i < tupnatts)
138 arg = index_getattr(itup, i + 1, itupdesc, &null);
139 else
140 {
141 arg = (Datum) 0;
142 null = true;
143 }
144 flags = (null ? SK_ISNULL : 0) | (indoption[i] << SK_BT_INDOPTION_SHIFT);
146 flags,
147 (AttrNumber) (i + 1),
150 rel->rd_indcollation[i],
151 procinfo,
152 arg);
153 /* Record if any key attribute is NULL (or truncated) */
154 if (null)
155 key->anynullkeys = true;
156 }
157
158 /*
159 * In NULLS NOT DISTINCT mode, we pretend that there are no null keys, so
160 * that full uniqueness check is done.
161 */
162 if (rel->rd_index->indnullsnotdistinct)
163 key->anynullkeys = false;
164
165 return key;
166}
167
168/*
169 * free a retracement stack made by _bt_search.
170 */
171void
173{
174 BTStack ostack;
175
176 while (stack != NULL)
177 {
178 ostack = stack;
179 stack = stack->bts_parent;
180 pfree(ostack);
181 }
182}
183
184/*
185 * _bt_compare_array_skey() -- apply array comparison function
186 *
187 * Compares caller's tuple attribute value to a scan key/array element.
188 * Helper function used during binary searches of SK_SEARCHARRAY arrays.
189 *
190 * This routine returns:
191 * <0 if tupdatum < arrdatum;
192 * 0 if tupdatum == arrdatum;
193 * >0 if tupdatum > arrdatum.
194 *
195 * This is essentially the same interface as _bt_compare: both functions
196 * compare the value that they're searching for to a binary search pivot.
197 * However, unlike _bt_compare, this function's "tuple argument" comes first,
198 * while its "array/scankey argument" comes second.
199*/
200static inline int32
202 Datum tupdatum, bool tupnull,
203 Datum arrdatum, ScanKey cur)
204{
205 int32 result = 0;
206
207 Assert(cur->sk_strategy == BTEqualStrategyNumber);
208
209 if (tupnull) /* NULL tupdatum */
210 {
211 if (cur->sk_flags & SK_ISNULL)
212 result = 0; /* NULL "=" NULL */
213 else if (cur->sk_flags & SK_BT_NULLS_FIRST)
214 result = -1; /* NULL "<" NOT_NULL */
215 else
216 result = 1; /* NULL ">" NOT_NULL */
217 }
218 else if (cur->sk_flags & SK_ISNULL) /* NOT_NULL tupdatum, NULL arrdatum */
219 {
220 if (cur->sk_flags & SK_BT_NULLS_FIRST)
221 result = 1; /* NOT_NULL ">" NULL */
222 else
223 result = -1; /* NOT_NULL "<" NULL */
224 }
225 else
226 {
227 /*
228 * Like _bt_compare, we need to be careful of cross-type comparisons,
229 * so the left value has to be the value that came from an index tuple
230 */
231 result = DatumGetInt32(FunctionCall2Coll(orderproc, cur->sk_collation,
232 tupdatum, arrdatum));
233
234 /*
235 * We flip the sign by following the obvious rule: flip whenever the
236 * column is a DESC column.
237 *
238 * _bt_compare does it the wrong way around (flip when *ASC*) in order
239 * to compensate for passing its orderproc arguments backwards. We
240 * don't need to play these games because we find it natural to pass
241 * tupdatum as the left value (and arrdatum as the right value).
242 */
243 if (cur->sk_flags & SK_BT_DESC)
244 INVERT_COMPARE_RESULT(result);
245 }
246
247 return result;
248}
249
250/*
251 * _bt_binsrch_array_skey() -- Binary search for next matching array key
252 *
253 * Returns an index to the first array element >= caller's tupdatum argument.
254 * This convention is more natural for forwards scan callers, but that can't
255 * really matter to backwards scan callers. Both callers require handling for
256 * the case where the match we return is < tupdatum, and symmetric handling
257 * for the case where our best match is > tupdatum.
258 *
259 * Also sets *set_elem_result to the result _bt_compare_array_skey returned
260 * when we used it to compare the matching array element to tupdatum/tupnull.
261 *
262 * cur_elem_trig indicates if array advancement was triggered by this array's
263 * scan key, and that the array is for a required scan key. We can apply this
264 * information to find the next matching array element in the current scan
265 * direction using far fewer comparisons (fewer on average, compared to naive
266 * binary search). This scheme takes advantage of an important property of
267 * required arrays: required arrays always advance in lockstep with the index
268 * scan's progress through the index's key space.
269 */
270int
272 bool cur_elem_trig, ScanDirection dir,
273 Datum tupdatum, bool tupnull,
274 BTArrayKeyInfo *array, ScanKey cur,
275 int32 *set_elem_result)
276{
277 int low_elem = 0,
278 mid_elem = -1,
279 high_elem = array->num_elems - 1,
280 result = 0;
281 Datum arrdatum;
282
283 Assert(cur->sk_flags & SK_SEARCHARRAY);
284 Assert(cur->sk_strategy == BTEqualStrategyNumber);
285
286 if (cur_elem_trig)
287 {
289 Assert(cur->sk_flags & SK_BT_REQFWD);
290
291 /*
292 * When the scan key that triggered array advancement is a required
293 * array scan key, it is now certain that the current array element
294 * (plus all prior elements relative to the current scan direction)
295 * cannot possibly be at or ahead of the corresponding tuple value.
296 * (_bt_checkkeys must have called _bt_tuple_before_array_skeys, which
297 * makes sure this is true as a condition of advancing the arrays.)
298 *
299 * This makes it safe to exclude array elements up to and including
300 * the former-current array element from our search.
301 *
302 * Separately, when array advancement was triggered by a required scan
303 * key, the array element immediately after the former-current element
304 * is often either an exact tupdatum match, or a "close by" near-match
305 * (a near-match tupdatum is one whose key space falls _between_ the
306 * former-current and new-current array elements). We'll detect both
307 * cases via an optimistic comparison of the new search lower bound
308 * (or new search upper bound in the case of backwards scans).
309 */
310 if (ScanDirectionIsForward(dir))
311 {
312 low_elem = array->cur_elem + 1; /* old cur_elem exhausted */
313
314 /* Compare prospective new cur_elem (also the new lower bound) */
315 if (high_elem >= low_elem)
316 {
317 arrdatum = array->elem_values[low_elem];
318 result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
319 arrdatum, cur);
320
321 if (result <= 0)
322 {
323 /* Optimistic comparison optimization worked out */
324 *set_elem_result = result;
325 return low_elem;
326 }
327 mid_elem = low_elem;
328 low_elem++; /* this cur_elem exhausted, too */
329 }
330
331 if (high_elem < low_elem)
332 {
333 /* Caller needs to perform "beyond end" array advancement */
334 *set_elem_result = 1;
335 return high_elem;
336 }
337 }
338 else
339 {
340 high_elem = array->cur_elem - 1; /* old cur_elem exhausted */
341
342 /* Compare prospective new cur_elem (also the new upper bound) */
343 if (high_elem >= low_elem)
344 {
345 arrdatum = array->elem_values[high_elem];
346 result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
347 arrdatum, cur);
348
349 if (result >= 0)
350 {
351 /* Optimistic comparison optimization worked out */
352 *set_elem_result = result;
353 return high_elem;
354 }
355 mid_elem = high_elem;
356 high_elem--; /* this cur_elem exhausted, too */
357 }
358
359 if (high_elem < low_elem)
360 {
361 /* Caller needs to perform "beyond end" array advancement */
362 *set_elem_result = -1;
363 return low_elem;
364 }
365 }
366 }
367
368 while (high_elem > low_elem)
369 {
370 mid_elem = low_elem + ((high_elem - low_elem) / 2);
371 arrdatum = array->elem_values[mid_elem];
372
373 result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
374 arrdatum, cur);
375
376 if (result == 0)
377 {
378 /*
379 * It's safe to quit as soon as we see an equal array element.
380 * This often saves an extra comparison or two...
381 */
382 low_elem = mid_elem;
383 break;
384 }
385
386 if (result > 0)
387 low_elem = mid_elem + 1;
388 else
389 high_elem = mid_elem;
390 }
391
392 /*
393 * ...but our caller also cares about how its searched-for tuple datum
394 * compares to the low_elem datum. Must always set *set_elem_result with
395 * the result of that comparison specifically.
396 */
397 if (low_elem != mid_elem)
398 result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
399 array->elem_values[low_elem], cur);
400
401 *set_elem_result = result;
402
403 return low_elem;
404}
405
406/*
407 * _bt_start_array_keys() -- Initialize array keys at start of a scan
408 *
409 * Set up the cur_elem counters and fill in the first sk_argument value for
410 * each array scankey.
411 */
412void
414{
415 BTScanOpaque so = (BTScanOpaque) scan->opaque;
416 int i;
417
418 Assert(so->numArrayKeys);
419 Assert(so->qual_ok);
420
421 for (i = 0; i < so->numArrayKeys; i++)
422 {
423 BTArrayKeyInfo *curArrayKey = &so->arrayKeys[i];
424 ScanKey skey = &so->keyData[curArrayKey->scan_key];
425
426 Assert(curArrayKey->num_elems > 0);
428
430 curArrayKey->cur_elem = curArrayKey->num_elems - 1;
431 else
432 curArrayKey->cur_elem = 0;
433 skey->sk_argument = curArrayKey->elem_values[curArrayKey->cur_elem];
434 }
435 so->scanBehind = so->oppositeDirCheck = false; /* reset */
436}
437
438/*
439 * _bt_advance_array_keys_increment() -- Advance to next set of array elements
440 *
441 * Advances the array keys by a single increment in the current scan
442 * direction. When there are multiple array keys this can roll over from the
443 * lowest order array to higher order arrays.
444 *
445 * Returns true if there is another set of values to consider, false if not.
446 * On true result, the scankeys are initialized with the next set of values.
447 * On false result, the scankeys stay the same, and the array keys are not
448 * advanced (every array remains at its final element for scan direction).
449 */
450static bool
452{
453 BTScanOpaque so = (BTScanOpaque) scan->opaque;
454
455 /*
456 * We must advance the last array key most quickly, since it will
457 * correspond to the lowest-order index column among the available
458 * qualifications
459 */
460 for (int i = so->numArrayKeys - 1; i >= 0; i--)
461 {
462 BTArrayKeyInfo *curArrayKey = &so->arrayKeys[i];
463 ScanKey skey = &so->keyData[curArrayKey->scan_key];
464 int cur_elem = curArrayKey->cur_elem;
465 int num_elems = curArrayKey->num_elems;
466 bool rolled = false;
467
468 if (ScanDirectionIsForward(dir) && ++cur_elem >= num_elems)
469 {
470 cur_elem = 0;
471 rolled = true;
472 }
473 else if (ScanDirectionIsBackward(dir) && --cur_elem < 0)
474 {
475 cur_elem = num_elems - 1;
476 rolled = true;
477 }
478
479 curArrayKey->cur_elem = cur_elem;
480 skey->sk_argument = curArrayKey->elem_values[cur_elem];
481 if (!rolled)
482 return true;
483
484 /* Need to advance next array key, if any */
485 }
486
487 /*
488 * The array keys are now exhausted.
489 *
490 * Restore the array keys to the state they were in immediately before we
491 * were called. This ensures that the arrays only ever ratchet in the
492 * current scan direction.
493 *
494 * Without this, scans could overlook matching tuples when the scan
495 * direction gets reversed just before btgettuple runs out of items to
496 * return, but just after _bt_readpage prepares all the items from the
497 * scan's final page in so->currPos. When we're on the final page it is
498 * typical for so->currPos to get invalidated once btgettuple finally
499 * returns false, which'll effectively invalidate the scan's array keys.
500 * That hasn't happened yet, though -- and in general it may never happen.
501 */
502 _bt_start_array_keys(scan, -dir);
503
504 return false;
505}
506
507/*
508 * _bt_rewind_nonrequired_arrays() -- Rewind non-required arrays
509 *
510 * Called when _bt_advance_array_keys decides to start a new primitive index
511 * scan on the basis of the current scan position being before the position
512 * that _bt_first is capable of repositioning the scan to by applying an
513 * inequality operator required in the opposite-to-scan direction only.
514 *
515 * Although equality strategy scan keys (for both arrays and non-arrays alike)
516 * are either marked required in both directions or in neither direction,
517 * there is a sense in which non-required arrays behave like required arrays.
518 * With a qual such as "WHERE a IN (100, 200) AND b >= 3 AND c IN (5, 6, 7)",
519 * the scan key on "c" is non-required, but nevertheless enables positioning
520 * the scan at the first tuple >= "(100, 3, 5)" on the leaf level during the
521 * first descent of the tree by _bt_first. Later on, there could also be a
522 * second descent, that places the scan right before tuples >= "(200, 3, 5)".
523 * _bt_first must never be allowed to build an insertion scan key whose "c"
524 * entry is set to a value other than 5, the "c" array's first element/value.
525 * (Actually, it's the first in the current scan direction. This example uses
526 * a forward scan.)
527 *
528 * Calling here resets the array scan key elements for the scan's non-required
529 * arrays. This is strictly necessary for correctness in a subset of cases
530 * involving "required in opposite direction"-triggered primitive index scans.
531 * Not all callers are at risk of _bt_first using a non-required array like
532 * this, but advancement always resets the arrays when another primitive scan
533 * is scheduled, just to keep things simple. Array advancement even makes
534 * sure to reset non-required arrays during scans that have no inequalities.
535 * (Advancement still won't call here when there are no inequalities, though
536 * that's just because it's all handled indirectly instead.)
537 *
538 * Note: _bt_verify_arrays_bt_first is called by an assertion to enforce that
539 * everybody got this right.
540 */
541static void
543{
544 BTScanOpaque so = (BTScanOpaque) scan->opaque;
545 int arrayidx = 0;
546
547 for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
548 {
549 ScanKey cur = so->keyData + ikey;
550 BTArrayKeyInfo *array = NULL;
551 int first_elem_dir;
552
553 if (!(cur->sk_flags & SK_SEARCHARRAY) ||
554 cur->sk_strategy != BTEqualStrategyNumber)
555 continue;
556
557 array = &so->arrayKeys[arrayidx++];
558 Assert(array->scan_key == ikey);
559
560 if ((cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)))
561 continue;
562
563 if (ScanDirectionIsForward(dir))
564 first_elem_dir = 0;
565 else
566 first_elem_dir = array->num_elems - 1;
567
568 if (array->cur_elem != first_elem_dir)
569 {
570 array->cur_elem = first_elem_dir;
571 cur->sk_argument = array->elem_values[first_elem_dir];
572 }
573 }
574}
575
576/*
577 * _bt_tuple_before_array_skeys() -- too early to advance required arrays?
578 *
579 * We always compare the tuple using the current array keys (which we assume
580 * are already set in so->keyData[]). readpagetup indicates if tuple is the
581 * scan's current _bt_readpage-wise tuple.
582 *
583 * readpagetup callers must only call here when _bt_check_compare already set
584 * continuescan=false. We help these callers deal with _bt_check_compare's
585 * inability to distinguishing between the < and > cases (it uses equality
586 * operator scan keys, whereas we use 3-way ORDER procs). These callers pass
587 * a _bt_check_compare-set sktrig value that indicates which scan key
588 * triggered the call (!readpagetup callers just pass us sktrig=0 instead).
589 * This information allows us to avoid wastefully checking earlier scan keys
590 * that were already deemed to have been satisfied inside _bt_check_compare.
591 *
592 * Returns false when caller's tuple is >= the current required equality scan
593 * keys (or <=, in the case of backwards scans). This happens to readpagetup
594 * callers when the scan has reached the point of needing its array keys
595 * advanced; caller will need to advance required and non-required arrays at
596 * scan key offsets >= sktrig, plus scan keys < sktrig iff sktrig rolls over.
597 * (When we return false to readpagetup callers, tuple can only be == current
598 * required equality scan keys when caller's sktrig indicates that the arrays
599 * need to be advanced due to an unsatisfied required inequality key trigger.)
600 *
601 * Returns true when caller passes a tuple that is < the current set of
602 * equality keys for the most significant non-equal required scan key/column
603 * (or > the keys, during backwards scans). This happens to readpagetup
604 * callers when tuple is still before the start of matches for the scan's
605 * required equality strategy scan keys. (sktrig can't have indicated that an
606 * inequality strategy scan key wasn't satisfied in _bt_check_compare when we
607 * return true. In fact, we automatically return false when passed such an
608 * inequality sktrig by readpagetup callers -- _bt_check_compare's initial
609 * continuescan=false doesn't really need to be confirmed here by us.)
610 *
611 * !readpagetup callers optionally pass us *scanBehind, which tracks whether
612 * any missing truncated attributes might have affected array advancement
613 * (compared to what would happen if it was shown the first non-pivot tuple on
614 * the page to the right of caller's finaltup/high key tuple instead). It's
615 * only possible that we'll set *scanBehind to true when caller passes us a
616 * pivot tuple (with truncated -inf attributes) that we return false for.
617 */
618static bool
620 IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
621 bool readpagetup, int sktrig, bool *scanBehind)
622{
623 BTScanOpaque so = (BTScanOpaque) scan->opaque;
624
625 Assert(so->numArrayKeys);
626 Assert(so->numberOfKeys);
627 Assert(sktrig == 0 || readpagetup);
628 Assert(!readpagetup || scanBehind == NULL);
629
630 if (scanBehind)
631 *scanBehind = false;
632
633 for (int ikey = sktrig; ikey < so->numberOfKeys; ikey++)
634 {
635 ScanKey cur = so->keyData + ikey;
636 Datum tupdatum;
637 bool tupnull;
638 int32 result;
639
640 /* readpagetup calls require one ORDER proc comparison (at most) */
641 Assert(!readpagetup || ikey == sktrig);
642
643 /*
644 * Once we reach a non-required scan key, we're completely done.
645 *
646 * Note: we deliberately don't consider the scan direction here.
647 * _bt_advance_array_keys caller requires that we track *scanBehind
648 * without concern for scan direction.
649 */
650 if ((cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) == 0)
651 {
652 Assert(!readpagetup);
653 Assert(ikey > sktrig || ikey == 0);
654 return false;
655 }
656
657 if (cur->sk_attno > tupnatts)
658 {
659 Assert(!readpagetup);
660
661 /*
662 * When we reach a high key's truncated attribute, assume that the
663 * tuple attribute's value is >= the scan's equality constraint
664 * scan keys (but set *scanBehind to let interested callers know
665 * that a truncated attribute might have affected our answer).
666 */
667 if (scanBehind)
668 *scanBehind = true;
669
670 return false;
671 }
672
673 /*
674 * Deal with inequality strategy scan keys that _bt_check_compare set
675 * continuescan=false for
676 */
677 if (cur->sk_strategy != BTEqualStrategyNumber)
678 {
679 /*
680 * When _bt_check_compare indicated that a required inequality
681 * scan key wasn't satisfied, there's no need to verify anything;
682 * caller always calls _bt_advance_array_keys with this sktrig.
683 */
684 if (readpagetup)
685 return false;
686
687 /*
688 * Otherwise we can't give up, since we must check all required
689 * scan keys (required in either direction) in order to correctly
690 * track *scanBehind for caller
691 */
692 continue;
693 }
694
695 tupdatum = index_getattr(tuple, cur->sk_attno, tupdesc, &tupnull);
696
697 result = _bt_compare_array_skey(&so->orderProcs[ikey],
698 tupdatum, tupnull,
699 cur->sk_argument, cur);
700
701 /*
702 * Does this comparison indicate that caller must _not_ advance the
703 * scan's arrays just yet?
704 */
705 if ((ScanDirectionIsForward(dir) && result < 0) ||
706 (ScanDirectionIsBackward(dir) && result > 0))
707 return true;
708
709 /*
710 * Does this comparison indicate that caller should now advance the
711 * scan's arrays? (Must be if we get here during a readpagetup call.)
712 */
713 if (readpagetup || result != 0)
714 {
715 Assert(result != 0);
716 return false;
717 }
718
719 /*
720 * Inconclusive -- need to check later scan keys, too.
721 *
722 * This must be a finaltup precheck, or a call made from an assertion.
723 */
724 Assert(result == 0);
725 }
726
727 Assert(!readpagetup);
728
729 return false;
730}
731
732/*
733 * _bt_start_prim_scan() -- start scheduled primitive index scan?
734 *
735 * Returns true if _bt_checkkeys scheduled another primitive index scan, just
736 * as the last one ended. Otherwise returns false, indicating that the array
737 * keys are now fully exhausted.
738 *
739 * Only call here during scans with one or more equality type array scan keys,
740 * after _bt_first or _bt_next return false.
741 */
742bool
744{
745 BTScanOpaque so = (BTScanOpaque) scan->opaque;
746
747 Assert(so->numArrayKeys);
748
749 so->scanBehind = so->oppositeDirCheck = false; /* reset */
750
751 /*
752 * Array keys are advanced within _bt_checkkeys when the scan reaches the
753 * leaf level (more precisely, they're advanced when the scan reaches the
754 * end of each distinct set of array elements). This process avoids
755 * repeat access to leaf pages (across multiple primitive index scans) by
756 * advancing the scan's array keys when it allows the primitive index scan
757 * to find nearby matching tuples (or when it eliminates ranges of array
758 * key space that can't possibly be satisfied by any index tuple).
759 *
760 * _bt_checkkeys sets a simple flag variable to schedule another primitive
761 * index scan. The flag tells us what to do.
762 *
763 * We cannot rely on _bt_first always reaching _bt_checkkeys. There are
764 * various cases where that won't happen. For example, if the index is
765 * completely empty, then _bt_first won't call _bt_readpage/_bt_checkkeys.
766 * We also don't expect a call to _bt_checkkeys during searches for a
767 * non-existent value that happens to be lower/higher than any existing
768 * value in the index.
769 *
770 * We don't require special handling for these cases -- we don't need to
771 * be explicitly instructed to _not_ perform another primitive index scan.
772 * It's up to code under the control of _bt_first to always set the flag
773 * when another primitive index scan will be required.
774 *
775 * This works correctly, even with the tricky cases listed above, which
776 * all involve access to leaf pages "near the boundaries of the key space"
777 * (whether it's from a leftmost/rightmost page, or an imaginary empty
778 * leaf root page). If _bt_checkkeys cannot be reached by a primitive
779 * index scan for one set of array keys, then it also won't be reached for
780 * any later set ("later" in terms of the direction that we scan the index
781 * and advance the arrays). The array keys won't have advanced in these
782 * cases, but that's the correct behavior (even _bt_advance_array_keys
783 * won't always advance the arrays at the point they become "exhausted").
784 */
785 if (so->needPrimScan)
786 {
787 Assert(_bt_verify_arrays_bt_first(scan, dir));
788
789 /*
790 * Flag was set -- must call _bt_first again, which will reset the
791 * scan's needPrimScan flag
792 */
793 return true;
794 }
795
796 /* The top-level index scan ran out of tuples in this scan direction */
797 if (scan->parallel_scan != NULL)
798 _bt_parallel_done(scan);
799
800 return false;
801}
802
803/*
804 * _bt_advance_array_keys() -- Advance array elements using a tuple
805 *
806 * The scan always gets a new qual as a consequence of calling here (except
807 * when we determine that the top-level scan has run out of matching tuples).
808 * All later _bt_check_compare calls also use the same new qual that was first
809 * used here (at least until the next call here advances the keys once again).
810 * It's convenient to structure _bt_check_compare rechecks of caller's tuple
811 * (using the new qual) as one the steps of advancing the scan's array keys,
812 * so this function works as a wrapper around _bt_check_compare.
813 *
814 * Like _bt_check_compare, we'll set pstate.continuescan on behalf of the
815 * caller, and return a boolean indicating if caller's tuple satisfies the
816 * scan's new qual. But unlike _bt_check_compare, we set so->needPrimScan
817 * when we set continuescan=false, indicating if a new primitive index scan
818 * has been scheduled (otherwise, the top-level scan has run out of tuples in
819 * the current scan direction).
820 *
821 * Caller must use _bt_tuple_before_array_skeys to determine if the current
822 * place in the scan is >= the current array keys _before_ calling here.
823 * We're responsible for ensuring that caller's tuple is <= the newly advanced
824 * required array keys once we return. We try to find an exact match, but
825 * failing that we'll advance the array keys to whatever set of array elements
826 * comes next in the key space for the current scan direction. Required array
827 * keys "ratchet forwards" (or backwards). They can only advance as the scan
828 * itself advances through the index/key space.
829 *
830 * (The rules are the same for backwards scans, except that the operators are
831 * flipped: just replace the precondition's >= operator with a <=, and the
832 * postcondition's <= operator with a >=. In other words, just swap the
833 * precondition with the postcondition.)
834 *
835 * We also deal with "advancing" non-required arrays here. Callers whose
836 * sktrig scan key is non-required specify sktrig_required=false. These calls
837 * are the only exception to the general rule about always advancing the
838 * required array keys (the scan may not even have a required array). These
839 * callers should just pass a NULL pstate (since there is never any question
840 * of stopping the scan). No call to _bt_tuple_before_array_skeys is required
841 * ahead of these calls (it's already clear that any required scan keys must
842 * be satisfied by caller's tuple).
843 *
844 * Note that we deal with non-array required equality strategy scan keys as
845 * degenerate single element arrays here. Obviously, they can never really
846 * advance in the way that real arrays can, but they must still affect how we
847 * advance real array scan keys (exactly like true array equality scan keys).
848 * We have to keep around a 3-way ORDER proc for these (using the "=" operator
849 * won't do), since in general whether the tuple is < or > _any_ unsatisfied
850 * required equality key influences how the scan's real arrays must advance.
851 *
852 * Note also that we may sometimes need to advance the array keys when the
853 * existing required array keys (and other required equality keys) are already
854 * an exact match for every corresponding value from caller's tuple. We must
855 * do this for inequalities that _bt_check_compare set continuescan=false for.
856 * They'll advance the array keys here, just like any other scan key that
857 * _bt_check_compare stops on. (This can even happen _after_ we advance the
858 * array keys, in which case we'll advance the array keys a second time. That
859 * way _bt_checkkeys caller always has its required arrays advance to the
860 * maximum possible extent that its tuple will allow.)
861 */
862static bool
864 IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
865 int sktrig, bool sktrig_required)
866{
867 BTScanOpaque so = (BTScanOpaque) scan->opaque;
868 Relation rel = scan->indexRelation;
869 ScanDirection dir = so->currPos.dir;
870 int arrayidx = 0;
871 bool beyond_end_advance = false,
872 has_required_opposite_direction_only = false,
873 oppodir_inequality_sktrig = false,
874 all_required_satisfied = true,
875 all_satisfied = true;
876
877 /*
878 * Unset so->scanBehind (and so->oppositeDirCheck) in case they're still
879 * set from back when we dealt with the previous page's high key/finaltup
880 */
881 so->scanBehind = so->oppositeDirCheck = false;
882
883 if (sktrig_required)
884 {
885 /*
886 * Precondition array state assertion
887 */
888 Assert(!_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc,
889 tupnatts, false, 0, NULL));
890
891 /*
892 * Required scan key wasn't satisfied, so required arrays will have to
893 * advance. Invalidate page-level state that tracks whether the
894 * scan's required-in-opposite-direction-only keys are known to be
895 * satisfied by page's remaining tuples.
896 */
897 pstate->firstmatch = false;
898
899 /* Shouldn't have to invalidate 'prechecked', though */
900 Assert(!pstate->prechecked);
901
902 /*
903 * Once we return we'll have a new set of required array keys, so
904 * reset state used by "look ahead" optimization
905 */
906 pstate->rechecks = 0;
907 pstate->targetdistance = 0;
908 }
909
910 Assert(_bt_verify_keys_with_arraykeys(scan));
911
912 for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
913 {
914 ScanKey cur = so->keyData + ikey;
915 BTArrayKeyInfo *array = NULL;
916 Datum tupdatum;
917 bool required = false,
918 required_opposite_direction_only = false,
919 tupnull;
920 int32 result;
921 int set_elem = 0;
922
923 if (cur->sk_strategy == BTEqualStrategyNumber)
924 {
925 /* Manage array state */
926 if (cur->sk_flags & SK_SEARCHARRAY)
927 {
928 array = &so->arrayKeys[arrayidx++];
929 Assert(array->scan_key == ikey);
930 }
931 }
932 else
933 {
934 /*
935 * Are any inequalities required in the opposite direction only
936 * present here?
937 */
938 if (((ScanDirectionIsForward(dir) &&
939 (cur->sk_flags & (SK_BT_REQBKWD))) ||
941 (cur->sk_flags & (SK_BT_REQFWD)))))
942 has_required_opposite_direction_only =
943 required_opposite_direction_only = true;
944 }
945
946 /* Optimization: skip over known-satisfied scan keys */
947 if (ikey < sktrig)
948 continue;
949
950 if (cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))
951 {
952 Assert(sktrig_required);
953
954 required = true;
955
956 if (cur->sk_attno > tupnatts)
957 {
958 /* Set this just like _bt_tuple_before_array_skeys */
959 Assert(sktrig < ikey);
960 so->scanBehind = true;
961 }
962 }
963
964 /*
965 * Handle a required non-array scan key that the initial call to
966 * _bt_check_compare indicated triggered array advancement, if any.
967 *
968 * The non-array scan key's strategy will be <, <=, or = during a
969 * forwards scan (or any one of =, >=, or > during a backwards scan).
970 * It follows that the corresponding tuple attribute's value must now
971 * be either > or >= the scan key value (for backwards scans it must
972 * be either < or <= that value).
973 *
974 * If this is a required equality strategy scan key, this is just an
975 * optimization; _bt_tuple_before_array_skeys already confirmed that
976 * this scan key places us ahead of caller's tuple. There's no need
977 * to repeat that work now. (The same underlying principle also gets
978 * applied by the cur_elem_trig optimization used to speed up searches
979 * for the next array element.)
980 *
981 * If this is a required inequality strategy scan key, we _must_ rely
982 * on _bt_check_compare like this; we aren't capable of directly
983 * evaluating required inequality strategy scan keys here, on our own.
984 */
985 if (ikey == sktrig && !array)
986 {
987 Assert(sktrig_required && required && all_required_satisfied);
988
989 /* Use "beyond end" advancement. See below for an explanation. */
990 beyond_end_advance = true;
991 all_satisfied = all_required_satisfied = false;
992
993 /*
994 * Set a flag that remembers that this was an inequality required
995 * in the opposite scan direction only, that nevertheless
996 * triggered the call here.
997 *
998 * This only happens when an inequality operator (which must be
999 * strict) encounters a group of NULLs that indicate the end of
1000 * non-NULL values for tuples in the current scan direction.
1001 */
1002 if (unlikely(required_opposite_direction_only))
1003 oppodir_inequality_sktrig = true;
1004
1005 continue;
1006 }
1007
1008 /*
1009 * Nothing more for us to do with an inequality strategy scan key that
1010 * wasn't the one that _bt_check_compare stopped on, though.
1011 *
1012 * Note: if our later call to _bt_check_compare (to recheck caller's
1013 * tuple) sets continuescan=false due to finding this same inequality
1014 * unsatisfied (possible when it's required in the scan direction),
1015 * we'll deal with it via a recursive "second pass" call.
1016 */
1017 else if (cur->sk_strategy != BTEqualStrategyNumber)
1018 continue;
1019
1020 /*
1021 * Nothing for us to do with an equality strategy scan key that isn't
1022 * marked required, either -- unless it's a non-required array
1023 */
1024 else if (!required && !array)
1025 continue;
1026
1027 /*
1028 * Here we perform steps for all array scan keys after a required
1029 * array scan key whose binary search triggered "beyond end of array
1030 * element" array advancement due to encountering a tuple attribute
1031 * value > the closest matching array key (or < for backwards scans).
1032 */
1033 if (beyond_end_advance)
1034 {
1035 int final_elem_dir;
1036
1037 if (ScanDirectionIsBackward(dir) || !array)
1038 final_elem_dir = 0;
1039 else
1040 final_elem_dir = array->num_elems - 1;
1041
1042 if (array && array->cur_elem != final_elem_dir)
1043 {
1044 array->cur_elem = final_elem_dir;
1045 cur->sk_argument = array->elem_values[final_elem_dir];
1046 }
1047
1048 continue;
1049 }
1050
1051 /*
1052 * Here we perform steps for all array scan keys after a required
1053 * array scan key whose tuple attribute was < the closest matching
1054 * array key when we dealt with it (or > for backwards scans).
1055 *
1056 * This earlier required array key already puts us ahead of caller's
1057 * tuple in the key space (for the current scan direction). We must
1058 * make sure that subsequent lower-order array keys do not put us too
1059 * far ahead (ahead of tuples that have yet to be seen by our caller).
1060 * For example, when a tuple "(a, b) = (42, 5)" advances the array
1061 * keys on "a" from 40 to 45, we must also set "b" to whatever the
1062 * first array element for "b" is. It would be wrong to allow "b" to
1063 * be set based on the tuple value.
1064 *
1065 * Perform the same steps with truncated high key attributes. You can
1066 * think of this as a "binary search" for the element closest to the
1067 * value -inf. Again, the arrays must never get ahead of the scan.
1068 */
1069 if (!all_required_satisfied || cur->sk_attno > tupnatts)
1070 {
1071 int first_elem_dir;
1072
1073 if (ScanDirectionIsForward(dir) || !array)
1074 first_elem_dir = 0;
1075 else
1076 first_elem_dir = array->num_elems - 1;
1077
1078 if (array && array->cur_elem != first_elem_dir)
1079 {
1080 array->cur_elem = first_elem_dir;
1081 cur->sk_argument = array->elem_values[first_elem_dir];
1082 }
1083
1084 continue;
1085 }
1086
1087 /*
1088 * Search in scankey's array for the corresponding tuple attribute
1089 * value from caller's tuple
1090 */
1091 tupdatum = index_getattr(tuple, cur->sk_attno, tupdesc, &tupnull);
1092
1093 if (array)
1094 {
1095 bool cur_elem_trig = (sktrig_required && ikey == sktrig);
1096
1097 /*
1098 * Binary search for closest match that's available from the array
1099 */
1100 set_elem = _bt_binsrch_array_skey(&so->orderProcs[ikey],
1101 cur_elem_trig, dir,
1102 tupdatum, tupnull, array, cur,
1103 &result);
1104
1105 Assert(set_elem >= 0 && set_elem < array->num_elems);
1106 }
1107 else
1108 {
1109 Assert(sktrig_required && required);
1110
1111 /*
1112 * This is a required non-array equality strategy scan key, which
1113 * we'll treat as a degenerate single element array.
1114 *
1115 * This scan key's imaginary "array" can't really advance, but it
1116 * can still roll over like any other array. (Actually, this is
1117 * no different to real single value arrays, which never advance
1118 * without rolling over -- they can never truly advance, either.)
1119 */
1120 result = _bt_compare_array_skey(&so->orderProcs[ikey],
1121 tupdatum, tupnull,
1122 cur->sk_argument, cur);
1123 }
1124
1125 /*
1126 * Consider "beyond end of array element" array advancement.
1127 *
1128 * When the tuple attribute value is > the closest matching array key
1129 * (or < in the backwards scan case), we need to ratchet this array
1130 * forward (backward) by one increment, so that caller's tuple ends up
1131 * being < final array value instead (or > final array value instead).
1132 * This process has to work for all of the arrays, not just this one:
1133 * it must "carry" to higher-order arrays when the set_elem that we
1134 * just found happens to be the final one for the scan's direction.
1135 * Incrementing (decrementing) set_elem itself isn't good enough.
1136 *
1137 * Our approach is to provisionally use set_elem as if it was an exact
1138 * match now, then set each later/less significant array to whatever
1139 * its final element is. Once outside the loop we'll then "increment
1140 * this array's set_elem" by calling _bt_advance_array_keys_increment.
1141 * That way the process rolls over to higher order arrays as needed.
1142 *
1143 * Under this scheme any required arrays only ever ratchet forwards
1144 * (or backwards), and always do so to the maximum possible extent
1145 * that we can know will be safe without seeing the scan's next tuple.
1146 * We don't need any special handling for required scan keys that lack
1147 * a real array to advance, nor for redundant scan keys that couldn't
1148 * be eliminated by _bt_preprocess_keys. It won't matter if some of
1149 * our "true" array scan keys (or even all of them) are non-required.
1150 */
1151 if (required &&
1152 ((ScanDirectionIsForward(dir) && result > 0) ||
1153 (ScanDirectionIsBackward(dir) && result < 0)))
1154 beyond_end_advance = true;
1155
1156 Assert(all_required_satisfied && all_satisfied);
1157 if (result != 0)
1158 {
1159 /*
1160 * Track whether caller's tuple satisfies our new post-advancement
1161 * qual, for required scan keys, as well as for the entire set of
1162 * interesting scan keys (all required scan keys plus non-required
1163 * array scan keys are considered interesting.)
1164 */
1165 all_satisfied = false;
1166 if (required)
1167 all_required_satisfied = false;
1168 else
1169 {
1170 /*
1171 * There's no need to advance the arrays using the best
1172 * available match for a non-required array. Give up now.
1173 * (Though note that sktrig_required calls still have to do
1174 * all the usual post-advancement steps, including the recheck
1175 * call to _bt_check_compare.)
1176 */
1177 break;
1178 }
1179 }
1180
1181 /* Advance array keys, even when set_elem isn't an exact match */
1182 if (array && array->cur_elem != set_elem)
1183 {
1184 array->cur_elem = set_elem;
1185 cur->sk_argument = array->elem_values[set_elem];
1186 }
1187 }
1188
1189 /*
1190 * Advance the array keys incrementally whenever "beyond end of array
1191 * element" array advancement happens, so that advancement will carry to
1192 * higher-order arrays (might exhaust all the scan's arrays instead, which
1193 * ends the top-level scan).
1194 */
1195 if (beyond_end_advance && !_bt_advance_array_keys_increment(scan, dir))
1196 goto end_toplevel_scan;
1197
1198 Assert(_bt_verify_keys_with_arraykeys(scan));
1199
1200 /*
1201 * Does tuple now satisfy our new qual? Recheck with _bt_check_compare.
1202 *
1203 * Calls triggered by an unsatisfied required scan key, whose tuple now
1204 * satisfies all required scan keys, but not all nonrequired array keys,
1205 * will still require a recheck call to _bt_check_compare. They'll still
1206 * need its "second pass" handling of required inequality scan keys.
1207 * (Might have missed a still-unsatisfied required inequality scan key
1208 * that caller didn't detect as the sktrig scan key during its initial
1209 * _bt_check_compare call that used the old/original qual.)
1210 *
1211 * Calls triggered by an unsatisfied nonrequired array scan key never need
1212 * "second pass" handling of required inequalities (nor any other handling
1213 * of any required scan key). All that matters is whether caller's tuple
1214 * satisfies the new qual, so it's safe to just skip the _bt_check_compare
1215 * recheck when we've already determined that it can only return 'false'.
1216 */
1217 if ((sktrig_required && all_required_satisfied) ||
1218 (!sktrig_required && all_satisfied))
1219 {
1220 int nsktrig = sktrig + 1;
1221 bool continuescan;
1222
1223 Assert(all_required_satisfied);
1224
1225 /* Recheck _bt_check_compare on behalf of caller */
1226 if (_bt_check_compare(scan, dir, tuple, tupnatts, tupdesc,
1227 false, false, false,
1228 &continuescan, &nsktrig) &&
1229 !so->scanBehind)
1230 {
1231 /* This tuple satisfies the new qual */
1232 Assert(all_satisfied && continuescan);
1233
1234 if (pstate)
1235 pstate->continuescan = true;
1236
1237 return true;
1238 }
1239
1240 /*
1241 * Consider "second pass" handling of required inequalities.
1242 *
1243 * It's possible that our _bt_check_compare call indicated that the
1244 * scan should end due to some unsatisfied inequality that wasn't
1245 * initially recognized as such by us. Handle this by calling
1246 * ourselves recursively, this time indicating that the trigger is the
1247 * inequality that we missed first time around (and using a set of
1248 * required array/equality keys that are now exact matches for tuple).
1249 *
1250 * We make a strong, general guarantee that every _bt_checkkeys call
1251 * here will advance the array keys to the maximum possible extent
1252 * that we can know to be safe based on caller's tuple alone. If we
1253 * didn't perform this step, then that guarantee wouldn't quite hold.
1254 */
1255 if (unlikely(!continuescan))
1256 {
1257 bool satisfied PG_USED_FOR_ASSERTS_ONLY;
1258
1259 Assert(sktrig_required);
1261
1262 /*
1263 * The tuple must use "beyond end" advancement during the
1264 * recursive call, so we cannot possibly end up back here when
1265 * recursing. We'll consume a small, fixed amount of stack space.
1266 */
1267 Assert(!beyond_end_advance);
1268
1269 /* Advance the array keys a second time using same tuple */
1270 satisfied = _bt_advance_array_keys(scan, pstate, tuple, tupnatts,
1271 tupdesc, nsktrig, true);
1272
1273 /* This tuple doesn't satisfy the inequality */
1274 Assert(!satisfied);
1275 return false;
1276 }
1277
1278 /*
1279 * Some non-required scan key (from new qual) still not satisfied.
1280 *
1281 * All scan keys required in the current scan direction must still be
1282 * satisfied, though, so we can trust all_required_satisfied below.
1283 */
1284 }
1285
1286 /*
1287 * When we were called just to deal with "advancing" non-required arrays,
1288 * this is as far as we can go (cannot stop the scan for these callers)
1289 */
1290 if (!sktrig_required)
1291 {
1292 /* Caller's tuple doesn't match any qual */
1293 return false;
1294 }
1295
1296 /*
1297 * Postcondition array state assertion (for still-unsatisfied tuples).
1298 *
1299 * By here we have established that the scan's required arrays (scan must
1300 * have at least one required array) advanced, without becoming exhausted.
1301 *
1302 * Caller's tuple is now < the newly advanced array keys (or > when this
1303 * is a backwards scan), except in the case where we only got this far due
1304 * to an unsatisfied non-required scan key. Verify that with an assert.
1305 *
1306 * Note: we don't just quit at this point when all required scan keys were
1307 * found to be satisfied because we need to consider edge-cases involving
1308 * scan keys required in the opposite direction only; those aren't tracked
1309 * by all_required_satisfied. (Actually, oppodir_inequality_sktrig trigger
1310 * scan keys are tracked by all_required_satisfied, since it's convenient
1311 * for _bt_check_compare to behave as if they are required in the current
1312 * scan direction to deal with NULLs. We'll account for that separately.)
1313 */
1314 Assert(_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc, tupnatts,
1315 false, 0, NULL) ==
1316 !all_required_satisfied);
1317
1318 /*
1319 * We generally permit primitive index scans to continue onto the next
1320 * sibling page when the page's finaltup satisfies all required scan keys
1321 * at the point where we're between pages.
1322 *
1323 * If caller's tuple is also the page's finaltup, and we see that required
1324 * scan keys still aren't satisfied, start a new primitive index scan.
1325 */
1326 if (!all_required_satisfied && pstate->finaltup == tuple)
1327 goto new_prim_scan;
1328
1329 /*
1330 * Proactively check finaltup (don't wait until finaltup is reached by the
1331 * scan) when it might well turn out to not be satisfied later on.
1332 *
1333 * Note: if so->scanBehind hasn't already been set for finaltup by us,
1334 * it'll be set during this call to _bt_tuple_before_array_skeys. Either
1335 * way, it'll be set correctly (for the whole page) after this point.
1336 */
1337 if (!all_required_satisfied && pstate->finaltup &&
1338 _bt_tuple_before_array_skeys(scan, dir, pstate->finaltup, tupdesc,
1339 BTreeTupleGetNAtts(pstate->finaltup, rel),
1340 false, 0, &so->scanBehind))
1341 goto new_prim_scan;
1342
1343 /*
1344 * When we encounter a truncated finaltup high key attribute, we're
1345 * optimistic about the chances of its corresponding required scan key
1346 * being satisfied when we go on to check it against tuples from this
1347 * page's right sibling leaf page. We consider truncated attributes to be
1348 * satisfied by required scan keys, which allows the primitive index scan
1349 * to continue to the next leaf page. We must set so->scanBehind to true
1350 * to remember that the last page's finaltup had "satisfied" required scan
1351 * keys for one or more truncated attribute values (scan keys required in
1352 * _either_ scan direction).
1353 *
1354 * There is a chance that _bt_checkkeys (which checks so->scanBehind) will
1355 * find that even the sibling leaf page's finaltup is < the new array
1356 * keys. When that happens, our optimistic policy will have incurred a
1357 * single extra leaf page access that could have been avoided.
1358 *
1359 * A pessimistic policy would give backward scans a gratuitous advantage
1360 * over forward scans. We'd punish forward scans for applying more
1361 * accurate information from the high key, rather than just using the
1362 * final non-pivot tuple as finaltup, in the style of backward scans.
1363 * Being pessimistic would also give some scans with non-required arrays a
1364 * perverse advantage over similar scans that use required arrays instead.
1365 *
1366 * You can think of this as a speculative bet on what the scan is likely
1367 * to find on the next page. It's not much of a gamble, though, since the
1368 * untruncated prefix of attributes must strictly satisfy the new qual
1369 * (though it's okay if any non-required scan keys fail to be satisfied).
1370 */
1371 if (so->scanBehind && has_required_opposite_direction_only)
1372 {
1373 /*
1374 * However, we need to work harder whenever the scan involves a scan
1375 * key required in the opposite direction to the scan only, along with
1376 * a finaltup with at least one truncated attribute that's associated
1377 * with a scan key marked required (required in either direction).
1378 *
1379 * _bt_check_compare simply won't stop the scan for a scan key that's
1380 * marked required in the opposite scan direction only. That leaves
1381 * us without an automatic way of reconsidering any opposite-direction
1382 * inequalities if it turns out that starting a new primitive index
1383 * scan will allow _bt_first to skip ahead by a great many leaf pages.
1384 *
1385 * We deal with this by explicitly scheduling a finaltup recheck on
1386 * the right sibling page. _bt_readpage calls _bt_oppodir_checkkeys
1387 * for next page's finaltup (and we skip it for this page's finaltup).
1388 */
1389 so->oppositeDirCheck = true; /* recheck next page's high key */
1390 }
1391
1392 /*
1393 * Handle inequalities marked required in the opposite scan direction.
1394 * They can also signal that we should start a new primitive index scan.
1395 *
1396 * It's possible that the scan is now positioned where "matching" tuples
1397 * begin, and that caller's tuple satisfies all scan keys required in the
1398 * current scan direction. But if caller's tuple still doesn't satisfy
1399 * other scan keys that are required in the opposite scan direction only
1400 * (e.g., a required >= strategy scan key when scan direction is forward),
1401 * it's still possible that there are many leaf pages before the page that
1402 * _bt_first could skip straight to. Groveling through all those pages
1403 * will always give correct answers, but it can be very inefficient. We
1404 * must avoid needlessly scanning extra pages.
1405 *
1406 * Separately, it's possible that _bt_check_compare set continuescan=false
1407 * for a scan key that's required in the opposite direction only. This is
1408 * a special case, that happens only when _bt_check_compare sees that the
1409 * inequality encountered a NULL value. This signals the end of non-NULL
1410 * values in the current scan direction, which is reason enough to end the
1411 * (primitive) scan. If this happens at the start of a large group of
1412 * NULL values, then we shouldn't expect to be called again until after
1413 * the scan has already read indefinitely-many leaf pages full of tuples
1414 * with NULL suffix values. We need a separate test for this case so that
1415 * we don't miss our only opportunity to skip over such a group of pages.
1416 * (_bt_first is expected to skip over the group of NULLs by applying a
1417 * similar "deduce NOT NULL" rule, where it finishes its insertion scan
1418 * key by consing up an explicit SK_SEARCHNOTNULL key.)
1419 *
1420 * Apply a test against finaltup to detect and recover from the problem:
1421 * if even finaltup doesn't satisfy such an inequality, we just skip by
1422 * starting a new primitive index scan. When we skip, we know for sure
1423 * that all of the tuples on the current page following caller's tuple are
1424 * also before the _bt_first-wise start of tuples for our new qual. That
1425 * at least suggests many more skippable pages beyond the current page.
1426 * (when so->oppositeDirCheck was set, this'll happen on the next page.)
1427 */
1428 else if (has_required_opposite_direction_only && pstate->finaltup &&
1429 (all_required_satisfied || oppodir_inequality_sktrig) &&
1430 unlikely(!_bt_oppodir_checkkeys(scan, dir, pstate->finaltup)))
1431 {
1432 /*
1433 * Make sure that any non-required arrays are set to the first array
1434 * element for the current scan direction
1435 */
1437 goto new_prim_scan;
1438 }
1439
1440 /*
1441 * Stick with the ongoing primitive index scan for now.
1442 *
1443 * It's possible that later tuples will also turn out to have values that
1444 * are still < the now-current array keys (or > the current array keys).
1445 * Our caller will handle this by performing what amounts to a linear
1446 * search of the page, implemented by calling _bt_check_compare and then
1447 * _bt_tuple_before_array_skeys for each tuple.
1448 *
1449 * This approach has various advantages over a binary search of the page.
1450 * Repeated binary searches of the page (one binary search for every array
1451 * advancement) won't outperform a continuous linear search. While there
1452 * are workloads that a naive linear search won't handle well, our caller
1453 * has a "look ahead" fallback mechanism to deal with that problem.
1454 */
1455 pstate->continuescan = true; /* Override _bt_check_compare */
1456 so->needPrimScan = false; /* _bt_readpage has more tuples to check */
1457
1458 if (so->scanBehind)
1459 {
1460 /* Optimization: skip by setting "look ahead" mechanism's offnum */
1462 pstate->skip = pstate->maxoff + 1;
1463 }
1464
1465 /* Caller's tuple doesn't match the new qual */
1466 return false;
1467
1468new_prim_scan:
1469
1470 Assert(pstate->finaltup); /* not on rightmost/leftmost page */
1471
1472 /*
1473 * End this primitive index scan, but schedule another.
1474 *
1475 * Note: We make a soft assumption that the current scan direction will
1476 * also be used within _bt_next, when it is asked to step off this page.
1477 * It is up to _bt_next to cancel this scheduled primitive index scan
1478 * whenever it steps to a page in the direction opposite currPos.dir.
1479 */
1480 pstate->continuescan = false; /* Tell _bt_readpage we're done... */
1481 so->needPrimScan = true; /* ...but call _bt_first again */
1482
1483 if (scan->parallel_scan)
1485
1486 /* Caller's tuple doesn't match the new qual */
1487 return false;
1488
1489end_toplevel_scan:
1490
1491 /*
1492 * End the current primitive index scan, but don't schedule another.
1493 *
1494 * This ends the entire top-level scan in the current scan direction.
1495 *
1496 * Note: The scan's arrays (including any non-required arrays) are now in
1497 * their final positions for the current scan direction. If the scan
1498 * direction happens to change, then the arrays will already be in their
1499 * first positions for what will then be the current scan direction.
1500 */
1501 pstate->continuescan = false; /* Tell _bt_readpage we're done... */
1502 so->needPrimScan = false; /* ...don't call _bt_first again, though */
1503
1504 /* Caller's tuple doesn't match any qual */
1505 return false;
1506}
1507
1508#ifdef USE_ASSERT_CHECKING
1509/*
1510 * Verify that the scan's qual state matches what we expect at the point that
1511 * _bt_start_prim_scan is about to start a just-scheduled new primitive scan.
1512 *
1513 * We enforce a rule against non-required array scan keys: they must start out
1514 * with whatever element is the first for the scan's current scan direction.
1515 * See _bt_rewind_nonrequired_arrays comments for an explanation.
1516 */
1517static bool
1518_bt_verify_arrays_bt_first(IndexScanDesc scan, ScanDirection dir)
1519{
1520 BTScanOpaque so = (BTScanOpaque) scan->opaque;
1521 int arrayidx = 0;
1522
1523 for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
1524 {
1525 ScanKey cur = so->keyData + ikey;
1526 BTArrayKeyInfo *array = NULL;
1527 int first_elem_dir;
1528
1529 if (!(cur->sk_flags & SK_SEARCHARRAY) ||
1530 cur->sk_strategy != BTEqualStrategyNumber)
1531 continue;
1532
1533 array = &so->arrayKeys[arrayidx++];
1534
1535 if (((cur->sk_flags & SK_BT_REQFWD) && ScanDirectionIsForward(dir)) ||
1536 ((cur->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsBackward(dir)))
1537 continue;
1538
1539 if (ScanDirectionIsForward(dir))
1540 first_elem_dir = 0;
1541 else
1542 first_elem_dir = array->num_elems - 1;
1543
1544 if (array->cur_elem != first_elem_dir)
1545 return false;
1546 }
1547
1548 return _bt_verify_keys_with_arraykeys(scan);
1549}
1550
1551/*
1552 * Verify that the scan's "so->keyData[]" scan keys are in agreement with
1553 * its array key state
1554 */
1555static bool
1556_bt_verify_keys_with_arraykeys(IndexScanDesc scan)
1557{
1558 BTScanOpaque so = (BTScanOpaque) scan->opaque;
1559 int last_sk_attno = InvalidAttrNumber,
1560 arrayidx = 0;
1561
1562 if (!so->qual_ok)
1563 return false;
1564
1565 for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
1566 {
1567 ScanKey cur = so->keyData + ikey;
1568 BTArrayKeyInfo *array;
1569
1570 if (cur->sk_strategy != BTEqualStrategyNumber ||
1571 !(cur->sk_flags & SK_SEARCHARRAY))
1572 continue;
1573
1574 array = &so->arrayKeys[arrayidx++];
1575 if (array->scan_key != ikey)
1576 return false;
1577
1578 if (array->num_elems <= 0)
1579 return false;
1580
1581 if (cur->sk_argument != array->elem_values[array->cur_elem])
1582 return false;
1583 if (last_sk_attno > cur->sk_attno)
1584 return false;
1585 last_sk_attno = cur->sk_attno;
1586 }
1587
1588 if (arrayidx != so->numArrayKeys)
1589 return false;
1590
1591 return true;
1592}
1593#endif
1594
1595/*
1596 * Test whether an indextuple satisfies all the scankey conditions.
1597 *
1598 * Return true if so, false if not. If the tuple fails to pass the qual,
1599 * we also determine whether there's any need to continue the scan beyond
1600 * this tuple, and set pstate.continuescan accordingly. See comments for
1601 * _bt_preprocess_keys() about how this is done.
1602 *
1603 * Forward scan callers can pass a high key tuple in the hopes of having
1604 * us set *continuescan to false, and avoiding an unnecessary visit to
1605 * the page to the right.
1606 *
1607 * Advances the scan's array keys when necessary for arrayKeys=true callers.
1608 * Caller can avoid all array related side-effects when calling just to do a
1609 * page continuescan precheck -- pass arrayKeys=false for that. Scans without
1610 * any arrays keys must always pass arrayKeys=false.
1611 *
1612 * Also stops and starts primitive index scans for arrayKeys=true callers.
1613 * Scans with array keys are required to set up page state that helps us with
1614 * this. The page's finaltup tuple (the page high key for a forward scan, or
1615 * the page's first non-pivot tuple for a backward scan) must be set in
1616 * pstate.finaltup ahead of the first call here for the page (or possibly the
1617 * first call after an initial continuescan-setting page precheck call). Set
1618 * this to NULL for rightmost page (or the leftmost page for backwards scans).
1619 *
1620 * scan: index scan descriptor (containing a search-type scankey)
1621 * pstate: page level input and output parameters
1622 * arrayKeys: should we advance the scan's array keys if necessary?
1623 * tuple: index tuple to test
1624 * tupnatts: number of attributes in tupnatts (high key may be truncated)
1625 */
1626bool
1627_bt_checkkeys(IndexScanDesc scan, BTReadPageState *pstate, bool arrayKeys,
1628 IndexTuple tuple, int tupnatts)
1629{
1630 TupleDesc tupdesc = RelationGetDescr(scan->indexRelation);
1631 BTScanOpaque so = (BTScanOpaque) scan->opaque;
1632 ScanDirection dir = so->currPos.dir;
1633 int ikey = 0;
1634 bool res;
1635
1636 Assert(BTreeTupleGetNAtts(tuple, scan->indexRelation) == tupnatts);
1637
1638 res = _bt_check_compare(scan, dir, tuple, tupnatts, tupdesc,
1639 arrayKeys, pstate->prechecked, pstate->firstmatch,
1640 &pstate->continuescan, &ikey);
1641
1642#ifdef USE_ASSERT_CHECKING
1643 if (!arrayKeys && so->numArrayKeys)
1644 {
1645 /*
1646 * This is a continuescan precheck call for a scan with array keys.
1647 *
1648 * Assert that the scan isn't in danger of becoming confused.
1649 */
1650 Assert(!so->scanBehind && !so->oppositeDirCheck);
1651 Assert(!pstate->prechecked && !pstate->firstmatch);
1652 Assert(!_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc,
1653 tupnatts, false, 0, NULL));
1654 }
1655 if (pstate->prechecked || pstate->firstmatch)
1656 {
1657 bool dcontinuescan;
1658 int dikey = 0;
1659
1660 /*
1661 * Call relied on continuescan/firstmatch prechecks -- assert that we
1662 * get the same answer without those optimizations
1663 */
1664 Assert(res == _bt_check_compare(scan, dir, tuple, tupnatts, tupdesc,
1665 false, false, false,
1666 &dcontinuescan, &dikey));
1667 Assert(pstate->continuescan == dcontinuescan);
1668 }
1669#endif
1670
1671 /*
1672 * Only one _bt_check_compare call is required in the common case where
1673 * there are no equality strategy array scan keys. Otherwise we can only
1674 * accept _bt_check_compare's answer unreservedly when it didn't set
1675 * pstate.continuescan=false.
1676 */
1677 if (!arrayKeys || pstate->continuescan)
1678 return res;
1679
1680 /*
1681 * _bt_check_compare call set continuescan=false in the presence of
1682 * equality type array keys. This could mean that the tuple is just past
1683 * the end of matches for the current array keys.
1684 *
1685 * It's also possible that the scan is still _before_ the _start_ of
1686 * tuples matching the current set of array keys. Check for that first.
1687 */
1688 if (_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc, tupnatts, true,
1689 ikey, NULL))
1690 {
1691 /*
1692 * Tuple is still before the start of matches according to the scan's
1693 * required array keys (according to _all_ of its required equality
1694 * strategy keys, actually).
1695 *
1696 * _bt_advance_array_keys occasionally sets so->scanBehind to signal
1697 * that the scan's current position/tuples might be significantly
1698 * behind (multiple pages behind) its current array keys. When this
1699 * happens, we need to be prepared to recover by starting a new
1700 * primitive index scan here, on our own.
1701 */
1702 Assert(!so->scanBehind ||
1704 if (unlikely(so->scanBehind) && pstate->finaltup &&
1705 _bt_tuple_before_array_skeys(scan, dir, pstate->finaltup, tupdesc,
1707 scan->indexRelation),
1708 false, 0, NULL))
1709 {
1710 /* Cut our losses -- start a new primitive index scan now */
1711 pstate->continuescan = false;
1712 so->needPrimScan = true;
1713 }
1714 else
1715 {
1716 /* Override _bt_check_compare, continue primitive scan */
1717 pstate->continuescan = true;
1718
1719 /*
1720 * We will end up here repeatedly given a group of tuples > the
1721 * previous array keys and < the now-current keys (for a backwards
1722 * scan it's just the same, though the operators swap positions).
1723 *
1724 * We must avoid allowing this linear search process to scan very
1725 * many tuples from well before the start of tuples matching the
1726 * current array keys (or from well before the point where we'll
1727 * once again have to advance the scan's array keys).
1728 *
1729 * We keep the overhead under control by speculatively "looking
1730 * ahead" to later still-unscanned items from this same leaf page.
1731 * We'll only attempt this once the number of tuples that the
1732 * linear search process has examined starts to get out of hand.
1733 */
1734 pstate->rechecks++;
1736 {
1737 /* See if we should skip ahead within the current leaf page */
1738 _bt_checkkeys_look_ahead(scan, pstate, tupnatts, tupdesc);
1739
1740 /*
1741 * Might have set pstate.skip to a later page offset. When
1742 * that happens then _bt_readpage caller will inexpensively
1743 * skip ahead to a later tuple from the same page (the one
1744 * just after the tuple we successfully "looked ahead" to).
1745 */
1746 }
1747 }
1748
1749 /* This indextuple doesn't match the current qual, in any case */
1750 return false;
1751 }
1752
1753 /*
1754 * Caller's tuple is >= the current set of array keys and other equality
1755 * constraint scan keys (or <= if this is a backwards scan). It's now
1756 * clear that we _must_ advance any required array keys in lockstep with
1757 * the scan.
1758 */
1759 return _bt_advance_array_keys(scan, pstate, tuple, tupnatts, tupdesc,
1760 ikey, true);
1761}
1762
1763/*
1764 * Test whether an indextuple fails to satisfy an inequality required in the
1765 * opposite direction only.
1766 *
1767 * Caller's finaltup tuple is the page high key (for forwards scans), or the
1768 * first non-pivot tuple (for backwards scans). Called during scans with
1769 * required array keys and required opposite-direction inequalities.
1770 *
1771 * Returns false if an inequality scan key required in the opposite direction
1772 * only isn't satisfied (and any earlier required scan keys are satisfied).
1773 * Otherwise returns true.
1774 *
1775 * An unsatisfied inequality required in the opposite direction only might
1776 * well enable skipping over many leaf pages, provided another _bt_first call
1777 * takes place. This type of unsatisfied inequality won't usually cause
1778 * _bt_checkkeys to stop the scan to consider array advancement/starting a new
1779 * primitive index scan.
1780 */
1781bool
1783 IndexTuple finaltup)
1784{
1785 Relation rel = scan->indexRelation;
1786 TupleDesc tupdesc = RelationGetDescr(rel);
1787 BTScanOpaque so = (BTScanOpaque) scan->opaque;
1788 int nfinaltupatts = BTreeTupleGetNAtts(finaltup, rel);
1789 bool continuescan;
1790 ScanDirection flipped = -dir;
1791 int ikey = 0;
1792
1793 Assert(so->numArrayKeys);
1794
1795 _bt_check_compare(scan, flipped, finaltup, nfinaltupatts, tupdesc,
1796 false, false, false, &continuescan, &ikey);
1797
1798 if (!continuescan && so->keyData[ikey].sk_strategy != BTEqualStrategyNumber)
1799 return false;
1800
1801 return true;
1802}
1803
1804/*
1805 * Test whether an indextuple satisfies current scan condition.
1806 *
1807 * Return true if so, false if not. If not, also sets *continuescan to false
1808 * when it's also not possible for any later tuples to pass the current qual
1809 * (with the scan's current set of array keys, in the current scan direction),
1810 * in addition to setting *ikey to the so->keyData[] subscript/offset for the
1811 * unsatisfied scan key (needed when caller must consider advancing the scan's
1812 * array keys).
1813 *
1814 * This is a subroutine for _bt_checkkeys. We provisionally assume that
1815 * reaching the end of the current set of required keys (in particular the
1816 * current required array keys) ends the ongoing (primitive) index scan.
1817 * Callers without array keys should just end the scan right away when they
1818 * find that continuescan has been set to false here by us. Things are more
1819 * complicated for callers with array keys.
1820 *
1821 * Callers with array keys must first consider advancing the arrays when
1822 * continuescan has been set to false here by us. They must then consider if
1823 * it really does make sense to end the current (primitive) index scan, in
1824 * light of everything that is known at that point. (In general when we set
1825 * continuescan=false for these callers it must be treated as provisional.)
1826 *
1827 * We deal with advancing unsatisfied non-required arrays directly, though.
1828 * This is safe, since by definition non-required keys can't end the scan.
1829 * This is just how we determine if non-required arrays are just unsatisfied
1830 * by the current array key, or if they're truly unsatisfied (that is, if
1831 * they're unsatisfied by every possible array key).
1832 *
1833 * Though we advance non-required array keys on our own, that shouldn't have
1834 * any lasting consequences for the scan. By definition, non-required arrays
1835 * have no fixed relationship with the scan's progress. (There are delicate
1836 * considerations for non-required arrays when the arrays need to be advanced
1837 * following our setting continuescan to false, but that doesn't concern us.)
1838 *
1839 * Pass advancenonrequired=false to avoid all array related side effects.
1840 * This allows _bt_advance_array_keys caller to avoid infinite recursion.
1841 */
1842static bool
1844 IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
1845 bool advancenonrequired, bool prechecked, bool firstmatch,
1846 bool *continuescan, int *ikey)
1847{
1848 BTScanOpaque so = (BTScanOpaque) scan->opaque;
1849
1850 *continuescan = true; /* default assumption */
1851
1852 for (; *ikey < so->numberOfKeys; (*ikey)++)
1853 {
1854 ScanKey key = so->keyData + *ikey;
1855 Datum datum;
1856 bool isNull;
1857 bool requiredSameDir = false,
1858 requiredOppositeDirOnly = false;
1859
1860 /*
1861 * Check if the key is required in the current scan direction, in the
1862 * opposite scan direction _only_, or in neither direction
1863 */
1864 if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsForward(dir)) ||
1865 ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsBackward(dir)))
1866 requiredSameDir = true;
1867 else if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsBackward(dir)) ||
1868 ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsForward(dir)))
1869 requiredOppositeDirOnly = true;
1870
1871 /*
1872 * If the caller told us the *continuescan flag is known to be true
1873 * for the last item on the page, then we know the keys required for
1874 * the current direction scan should be matched. Otherwise, the
1875 * *continuescan flag would be set for the current item and
1876 * subsequently the last item on the page accordingly.
1877 *
1878 * If the key is required for the opposite direction scan, we can skip
1879 * the check if the caller tells us there was already at least one
1880 * matching item on the page. Also, we require the *continuescan flag
1881 * to be true for the last item on the page to know there are no
1882 * NULLs.
1883 *
1884 * Both cases above work except for the row keys, where NULLs could be
1885 * found in the middle of matching values.
1886 */
1887 if (prechecked &&
1888 (requiredSameDir || (requiredOppositeDirOnly && firstmatch)) &&
1889 !(key->sk_flags & SK_ROW_HEADER))
1890 continue;
1891
1892 if (key->sk_attno > tupnatts)
1893 {
1894 /*
1895 * This attribute is truncated (must be high key). The value for
1896 * this attribute in the first non-pivot tuple on the page to the
1897 * right could be any possible value. Assume that truncated
1898 * attribute passes the qual.
1899 */
1900 Assert(BTreeTupleIsPivot(tuple));
1901 continue;
1902 }
1903
1904 /* row-comparison keys need special processing */
1905 if (key->sk_flags & SK_ROW_HEADER)
1906 {
1907 if (_bt_check_rowcompare(key, tuple, tupnatts, tupdesc, dir,
1908 continuescan))
1909 continue;
1910 return false;
1911 }
1912
1913 datum = index_getattr(tuple,
1914 key->sk_attno,
1915 tupdesc,
1916 &isNull);
1917
1918 if (key->sk_flags & SK_ISNULL)
1919 {
1920 /* Handle IS NULL/NOT NULL tests */
1921 if (key->sk_flags & SK_SEARCHNULL)
1922 {
1923 if (isNull)
1924 continue; /* tuple satisfies this qual */
1925 }
1926 else
1927 {
1928 Assert(key->sk_flags & SK_SEARCHNOTNULL);
1929 if (!isNull)
1930 continue; /* tuple satisfies this qual */
1931 }
1932
1933 /*
1934 * Tuple fails this qual. If it's a required qual for the current
1935 * scan direction, then we can conclude no further tuples will
1936 * pass, either.
1937 */
1938 if (requiredSameDir)
1939 *continuescan = false;
1940
1941 /*
1942 * In any case, this indextuple doesn't match the qual.
1943 */
1944 return false;
1945 }
1946
1947 if (isNull)
1948 {
1949 if (key->sk_flags & SK_BT_NULLS_FIRST)
1950 {
1951 /*
1952 * Since NULLs are sorted before non-NULLs, we know we have
1953 * reached the lower limit of the range of values for this
1954 * index attr. On a backward scan, we can stop if this qual
1955 * is one of the "must match" subset. We can stop regardless
1956 * of whether the qual is > or <, so long as it's required,
1957 * because it's not possible for any future tuples to pass. On
1958 * a forward scan, however, we must keep going, because we may
1959 * have initially positioned to the start of the index.
1960 * (_bt_advance_array_keys also relies on this behavior during
1961 * forward scans.)
1962 */
1963 if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
1965 *continuescan = false;
1966 }
1967 else
1968 {
1969 /*
1970 * Since NULLs are sorted after non-NULLs, we know we have
1971 * reached the upper limit of the range of values for this
1972 * index attr. On a forward scan, we can stop if this qual is
1973 * one of the "must match" subset. We can stop regardless of
1974 * whether the qual is > or <, so long as it's required,
1975 * because it's not possible for any future tuples to pass. On
1976 * a backward scan, however, we must keep going, because we
1977 * may have initially positioned to the end of the index.
1978 * (_bt_advance_array_keys also relies on this behavior during
1979 * backward scans.)
1980 */
1981 if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
1983 *continuescan = false;
1984 }
1985
1986 /*
1987 * In any case, this indextuple doesn't match the qual.
1988 */
1989 return false;
1990 }
1991
1992 /*
1993 * Apply the key-checking function, though only if we must.
1994 *
1995 * When a key is required in the opposite-of-scan direction _only_,
1996 * then it must already be satisfied if firstmatch=true indicates that
1997 * an earlier tuple from this same page satisfied it earlier on.
1998 */
1999 if (!(requiredOppositeDirOnly && firstmatch) &&
2000 !DatumGetBool(FunctionCall2Coll(&key->sk_func, key->sk_collation,
2001 datum, key->sk_argument)))
2002 {
2003 /*
2004 * Tuple fails this qual. If it's a required qual for the current
2005 * scan direction, then we can conclude no further tuples will
2006 * pass, either.
2007 *
2008 * Note: because we stop the scan as soon as any required equality
2009 * qual fails, it is critical that equality quals be used for the
2010 * initial positioning in _bt_first() when they are available. See
2011 * comments in _bt_first().
2012 */
2013 if (requiredSameDir)
2014 *continuescan = false;
2015
2016 /*
2017 * If this is a non-required equality-type array key, the tuple
2018 * needs to be checked against every possible array key. Handle
2019 * this by "advancing" the scan key's array to a matching value
2020 * (if we're successful then the tuple might match the qual).
2021 */
2022 else if (advancenonrequired &&
2023 key->sk_strategy == BTEqualStrategyNumber &&
2024 (key->sk_flags & SK_SEARCHARRAY))
2025 return _bt_advance_array_keys(scan, NULL, tuple, tupnatts,
2026 tupdesc, *ikey, false);
2027
2028 /*
2029 * This indextuple doesn't match the qual.
2030 */
2031 return false;
2032 }
2033 }
2034
2035 /* If we get here, the tuple passes all index quals. */
2036 return true;
2037}
2038
2039/*
2040 * Test whether an indextuple satisfies a row-comparison scan condition.
2041 *
2042 * Return true if so, false if not. If not, also clear *continuescan if
2043 * it's not possible for any future tuples in the current scan direction
2044 * to pass the qual.
2045 *
2046 * This is a subroutine for _bt_checkkeys/_bt_check_compare.
2047 */
2048static bool
2049_bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
2050 TupleDesc tupdesc, ScanDirection dir, bool *continuescan)
2051{
2052 ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
2053 int32 cmpresult = 0;
2054 bool result;
2055
2056 /* First subkey should be same as the header says */
2057 Assert(subkey->sk_attno == skey->sk_attno);
2058
2059 /* Loop over columns of the row condition */
2060 for (;;)
2061 {
2062 Datum datum;
2063 bool isNull;
2064
2065 Assert(subkey->sk_flags & SK_ROW_MEMBER);
2066
2067 if (subkey->sk_attno > tupnatts)
2068 {
2069 /*
2070 * This attribute is truncated (must be high key). The value for
2071 * this attribute in the first non-pivot tuple on the page to the
2072 * right could be any possible value. Assume that truncated
2073 * attribute passes the qual.
2074 */
2075 Assert(BTreeTupleIsPivot(tuple));
2076 cmpresult = 0;
2077 if (subkey->sk_flags & SK_ROW_END)
2078 break;
2079 subkey++;
2080 continue;
2081 }
2082
2083 datum = index_getattr(tuple,
2084 subkey->sk_attno,
2085 tupdesc,
2086 &isNull);
2087
2088 if (isNull)
2089 {
2090 if (subkey->sk_flags & SK_BT_NULLS_FIRST)
2091 {
2092 /*
2093 * Since NULLs are sorted before non-NULLs, we know we have
2094 * reached the lower limit of the range of values for this
2095 * index attr. On a backward scan, we can stop if this qual
2096 * is one of the "must match" subset. We can stop regardless
2097 * of whether the qual is > or <, so long as it's required,
2098 * because it's not possible for any future tuples to pass. On
2099 * a forward scan, however, we must keep going, because we may
2100 * have initially positioned to the start of the index.
2101 * (_bt_advance_array_keys also relies on this behavior during
2102 * forward scans.)
2103 */
2104 if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
2106 *continuescan = false;
2107 }
2108 else
2109 {
2110 /*
2111 * Since NULLs are sorted after non-NULLs, we know we have
2112 * reached the upper limit of the range of values for this
2113 * index attr. On a forward scan, we can stop if this qual is
2114 * one of the "must match" subset. We can stop regardless of
2115 * whether the qual is > or <, so long as it's required,
2116 * because it's not possible for any future tuples to pass. On
2117 * a backward scan, however, we must keep going, because we
2118 * may have initially positioned to the end of the index.
2119 * (_bt_advance_array_keys also relies on this behavior during
2120 * backward scans.)
2121 */
2122 if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
2124 *continuescan = false;
2125 }
2126
2127 /*
2128 * In any case, this indextuple doesn't match the qual.
2129 */
2130 return false;
2131 }
2132
2133 if (subkey->sk_flags & SK_ISNULL)
2134 {
2135 /*
2136 * Unlike the simple-scankey case, this isn't a disallowed case
2137 * (except when it's the first row element that has the NULL arg).
2138 * But it can never match. If all the earlier row comparison
2139 * columns are required for the scan direction, we can stop the
2140 * scan, because there can't be another tuple that will succeed.
2141 */
2142 Assert(subkey != (ScanKey) DatumGetPointer(skey->sk_argument));
2143 subkey--;
2144 if ((subkey->sk_flags & SK_BT_REQFWD) &&
2146 *continuescan = false;
2147 else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
2149 *continuescan = false;
2150 return false;
2151 }
2152
2153 /* Perform the test --- three-way comparison not bool operator */
2154 cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
2155 subkey->sk_collation,
2156 datum,
2157 subkey->sk_argument));
2158
2159 if (subkey->sk_flags & SK_BT_DESC)
2160 INVERT_COMPARE_RESULT(cmpresult);
2161
2162 /* Done comparing if unequal, else advance to next column */
2163 if (cmpresult != 0)
2164 break;
2165
2166 if (subkey->sk_flags & SK_ROW_END)
2167 break;
2168 subkey++;
2169 }
2170
2171 /*
2172 * At this point cmpresult indicates the overall result of the row
2173 * comparison, and subkey points to the deciding column (or the last
2174 * column if the result is "=").
2175 */
2176 switch (subkey->sk_strategy)
2177 {
2178 /* EQ and NE cases aren't allowed here */
2180 result = (cmpresult < 0);
2181 break;
2183 result = (cmpresult <= 0);
2184 break;
2186 result = (cmpresult >= 0);
2187 break;
2189 result = (cmpresult > 0);
2190 break;
2191 default:
2192 elog(ERROR, "unexpected strategy number %d", subkey->sk_strategy);
2193 result = 0; /* keep compiler quiet */
2194 break;
2195 }
2196
2197 if (!result)
2198 {
2199 /*
2200 * Tuple fails this qual. If it's a required qual for the current
2201 * scan direction, then we can conclude no further tuples will pass,
2202 * either. Note we have to look at the deciding column, not
2203 * necessarily the first or last column of the row condition.
2204 */
2205 if ((subkey->sk_flags & SK_BT_REQFWD) &&
2207 *continuescan = false;
2208 else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
2210 *continuescan = false;
2211 }
2212
2213 return result;
2214}
2215
2216/*
2217 * Determine if a scan with array keys should skip over uninteresting tuples.
2218 *
2219 * This is a subroutine for _bt_checkkeys. Called when _bt_readpage's linear
2220 * search process (started after it finishes reading an initial group of
2221 * matching tuples, used to locate the start of the next group of tuples
2222 * matching the next set of required array keys) has already scanned an
2223 * excessive number of tuples whose key space is "between arrays".
2224 *
2225 * When we perform look ahead successfully, we'll sets pstate.skip, which
2226 * instructs _bt_readpage to skip ahead to that tuple next (could be past the
2227 * end of the scan's leaf page). Pages where the optimization is effective
2228 * will generally still need to skip several times. Each call here performs
2229 * only a single "look ahead" comparison of a later tuple, whose distance from
2230 * the current tuple's offset number is determined by applying heuristics.
2231 */
2232static void
2234 int tupnatts, TupleDesc tupdesc)
2235{
2236 BTScanOpaque so = (BTScanOpaque) scan->opaque;
2237 ScanDirection dir = so->currPos.dir;
2238 OffsetNumber aheadoffnum;
2239 IndexTuple ahead;
2240
2241 /* Avoid looking ahead when comparing the page high key */
2242 if (pstate->offnum < pstate->minoff)
2243 return;
2244
2245 /*
2246 * Don't look ahead when there aren't enough tuples remaining on the page
2247 * (in the current scan direction) for it to be worth our while
2248 */
2249 if (ScanDirectionIsForward(dir) &&
2250 pstate->offnum >= pstate->maxoff - LOOK_AHEAD_DEFAULT_DISTANCE)
2251 return;
2252 else if (ScanDirectionIsBackward(dir) &&
2253 pstate->offnum <= pstate->minoff + LOOK_AHEAD_DEFAULT_DISTANCE)
2254 return;
2255
2256 /*
2257 * The look ahead distance starts small, and ramps up as each call here
2258 * allows _bt_readpage to skip over more tuples
2259 */
2260 if (!pstate->targetdistance)
2262 else if (pstate->targetdistance < MaxIndexTuplesPerPage / 2)
2263 pstate->targetdistance *= 2;
2264
2265 /* Don't read past the end (or before the start) of the page, though */
2266 if (ScanDirectionIsForward(dir))
2267 aheadoffnum = Min((int) pstate->maxoff,
2268 (int) pstate->offnum + pstate->targetdistance);
2269 else
2270 aheadoffnum = Max((int) pstate->minoff,
2271 (int) pstate->offnum - pstate->targetdistance);
2272
2273 ahead = (IndexTuple) PageGetItem(pstate->page,
2274 PageGetItemId(pstate->page, aheadoffnum));
2275 if (_bt_tuple_before_array_skeys(scan, dir, ahead, tupdesc, tupnatts,
2276 false, 0, NULL))
2277 {
2278 /*
2279 * Success -- instruct _bt_readpage to skip ahead to very next tuple
2280 * after the one we determined was still before the current array keys
2281 */
2282 if (ScanDirectionIsForward(dir))
2283 pstate->skip = aheadoffnum + 1;
2284 else
2285 pstate->skip = aheadoffnum - 1;
2286 }
2287 else
2288 {
2289 /*
2290 * Failure -- "ahead" tuple is too far ahead (we were too aggressive).
2291 *
2292 * Reset the number of rechecks, and aggressively reduce the target
2293 * distance (we're much more aggressive here than we were when the
2294 * distance was initially ramped up).
2295 */
2296 pstate->rechecks = 0;
2297 pstate->targetdistance = Max(pstate->targetdistance / 8, 1);
2298 }
2299}
2300
2301/*
2302 * _bt_killitems - set LP_DEAD state for items an indexscan caller has
2303 * told us were killed
2304 *
2305 * scan->opaque, referenced locally through so, contains information about the
2306 * current page and killed tuples thereon (generally, this should only be
2307 * called if so->numKilled > 0).
2308 *
2309 * The caller does not have a lock on the page and may or may not have the
2310 * page pinned in a buffer. Note that read-lock is sufficient for setting
2311 * LP_DEAD status (which is only a hint).
2312 *
2313 * We match items by heap TID before assuming they are the right ones to
2314 * delete. We cope with cases where items have moved right due to insertions.
2315 * If an item has moved off the current page due to a split, we'll fail to
2316 * find it and do nothing (this is not an error case --- we assume the item
2317 * will eventually get marked in a future indexscan).
2318 *
2319 * Note that if we hold a pin on the target page continuously from initially
2320 * reading the items until applying this function, VACUUM cannot have deleted
2321 * any items from the page, and so there is no need to search left from the
2322 * recorded offset. (This observation also guarantees that the item is still
2323 * the right one to delete, which might otherwise be questionable since heap
2324 * TIDs can get recycled.) This holds true even if the page has been modified
2325 * by inserts and page splits, so there is no need to consult the LSN.
2326 *
2327 * If the pin was released after reading the page, then we re-read it. If it
2328 * has been modified since we read it (as determined by the LSN), we dare not
2329 * flag any entries because it is possible that the old entry was vacuumed
2330 * away and the TID was re-used by a completely different heap tuple.
2331 */
2332void
2334{
2335 BTScanOpaque so = (BTScanOpaque) scan->opaque;
2336 Page page;
2337 BTPageOpaque opaque;
2338 OffsetNumber minoff;
2339 OffsetNumber maxoff;
2340 int i;
2341 int numKilled = so->numKilled;
2342 bool killedsomething = false;
2343 bool droppedpin PG_USED_FOR_ASSERTS_ONLY;
2344
2346
2347 /*
2348 * Always reset the scan state, so we don't look for same items on other
2349 * pages.
2350 */
2351 so->numKilled = 0;
2352
2353 if (BTScanPosIsPinned(so->currPos))
2354 {
2355 /*
2356 * We have held the pin on this page since we read the index tuples,
2357 * so all we need to do is lock it. The pin will have prevented
2358 * re-use of any TID on the page, so there is no need to check the
2359 * LSN.
2360 */
2361 droppedpin = false;
2363
2364 page = BufferGetPage(so->currPos.buf);
2365 }
2366 else
2367 {
2368 Buffer buf;
2369
2370 droppedpin = true;
2371 /* Attempt to re-read the buffer, getting pin and lock. */
2373
2374 page = BufferGetPage(buf);
2375 if (BufferGetLSNAtomic(buf) == so->currPos.lsn)
2376 so->currPos.buf = buf;
2377 else
2378 {
2379 /* Modified while not pinned means hinting is not safe. */
2381 return;
2382 }
2383 }
2384
2385 opaque = BTPageGetOpaque(page);
2386 minoff = P_FIRSTDATAKEY(opaque);
2387 maxoff = PageGetMaxOffsetNumber(page);
2388
2389 for (i = 0; i < numKilled; i++)
2390 {
2391 int itemIndex = so->killedItems[i];
2392 BTScanPosItem *kitem = &so->currPos.items[itemIndex];
2393 OffsetNumber offnum = kitem->indexOffset;
2394
2395 Assert(itemIndex >= so->currPos.firstItem &&
2396 itemIndex <= so->currPos.lastItem);
2397 if (offnum < minoff)
2398 continue; /* pure paranoia */
2399 while (offnum <= maxoff)
2400 {
2401 ItemId iid = PageGetItemId(page, offnum);
2402 IndexTuple ituple = (IndexTuple) PageGetItem(page, iid);
2403 bool killtuple = false;
2404
2405 if (BTreeTupleIsPosting(ituple))
2406 {
2407 int pi = i + 1;
2408 int nposting = BTreeTupleGetNPosting(ituple);
2409 int j;
2410
2411 /*
2412 * We rely on the convention that heap TIDs in the scanpos
2413 * items array are stored in ascending heap TID order for a
2414 * group of TIDs that originally came from a posting list
2415 * tuple. This convention even applies during backwards
2416 * scans, where returning the TIDs in descending order might
2417 * seem more natural. This is about effectiveness, not
2418 * correctness.
2419 *
2420 * Note that the page may have been modified in almost any way
2421 * since we first read it (in the !droppedpin case), so it's
2422 * possible that this posting list tuple wasn't a posting list
2423 * tuple when we first encountered its heap TIDs.
2424 */
2425 for (j = 0; j < nposting; j++)
2426 {
2427 ItemPointer item = BTreeTupleGetPostingN(ituple, j);
2428
2429 if (!ItemPointerEquals(item, &kitem->heapTid))
2430 break; /* out of posting list loop */
2431
2432 /*
2433 * kitem must have matching offnum when heap TIDs match,
2434 * though only in the common case where the page can't
2435 * have been concurrently modified
2436 */
2437 Assert(kitem->indexOffset == offnum || !droppedpin);
2438
2439 /*
2440 * Read-ahead to later kitems here.
2441 *
2442 * We rely on the assumption that not advancing kitem here
2443 * will prevent us from considering the posting list tuple
2444 * fully dead by not matching its next heap TID in next
2445 * loop iteration.
2446 *
2447 * If, on the other hand, this is the final heap TID in
2448 * the posting list tuple, then tuple gets killed
2449 * regardless (i.e. we handle the case where the last
2450 * kitem is also the last heap TID in the last index tuple
2451 * correctly -- posting tuple still gets killed).
2452 */
2453 if (pi < numKilled)
2454 kitem = &so->currPos.items[so->killedItems[pi++]];
2455 }
2456
2457 /*
2458 * Don't bother advancing the outermost loop's int iterator to
2459 * avoid processing killed items that relate to the same
2460 * offnum/posting list tuple. This micro-optimization hardly
2461 * seems worth it. (Further iterations of the outermost loop
2462 * will fail to match on this same posting list's first heap
2463 * TID instead, so we'll advance to the next offnum/index
2464 * tuple pretty quickly.)
2465 */
2466 if (j == nposting)
2467 killtuple = true;
2468 }
2469 else if (ItemPointerEquals(&ituple->t_tid, &kitem->heapTid))
2470 killtuple = true;
2471
2472 /*
2473 * Mark index item as dead, if it isn't already. Since this
2474 * happens while holding a buffer lock possibly in shared mode,
2475 * it's possible that multiple processes attempt to do this
2476 * simultaneously, leading to multiple full-page images being sent
2477 * to WAL (if wal_log_hints or data checksums are enabled), which
2478 * is undesirable.
2479 */
2480 if (killtuple && !ItemIdIsDead(iid))
2481 {
2482 /* found the item/all posting list items */
2483 ItemIdMarkDead(iid);
2484 killedsomething = true;
2485 break; /* out of inner search loop */
2486 }
2487 offnum = OffsetNumberNext(offnum);
2488 }
2489 }
2490
2491 /*
2492 * Since this can be redone later if needed, mark as dirty hint.
2493 *
2494 * Whenever we mark anything LP_DEAD, we also set the page's
2495 * BTP_HAS_GARBAGE flag, which is likewise just a hint. (Note that we
2496 * only rely on the page-level flag in !heapkeyspace indexes.)
2497 */
2498 if (killedsomething)
2499 {
2500 opaque->btpo_flags |= BTP_HAS_GARBAGE;
2501 MarkBufferDirtyHint(so->currPos.buf, true);
2502 }
2503
2505}
2506
2507
2508/*
2509 * The following routines manage a shared-memory area in which we track
2510 * assignment of "vacuum cycle IDs" to currently-active btree vacuuming
2511 * operations. There is a single counter which increments each time we
2512 * start a vacuum to assign it a cycle ID. Since multiple vacuums could
2513 * be active concurrently, we have to track the cycle ID for each active
2514 * vacuum; this requires at most MaxBackends entries (usually far fewer).
2515 * We assume at most one vacuum can be active for a given index.
2516 *
2517 * Access to the shared memory area is controlled by BtreeVacuumLock.
2518 * In principle we could use a separate lmgr locktag for each index,
2519 * but a single LWLock is much cheaper, and given the short time that
2520 * the lock is ever held, the concurrency hit should be minimal.
2521 */
2522
2523typedef struct BTOneVacInfo
2524{
2525 LockRelId relid; /* global identifier of an index */
2526 BTCycleId cycleid; /* cycle ID for its active VACUUM */
2528
2529typedef struct BTVacInfo
2530{
2531 BTCycleId cycle_ctr; /* cycle ID most recently assigned */
2532 int num_vacuums; /* number of currently active VACUUMs */
2533 int max_vacuums; /* allocated length of vacuums[] array */
2536
2538
2539
2540/*
2541 * _bt_vacuum_cycleid --- get the active vacuum cycle ID for an index,
2542 * or zero if there is no active VACUUM
2543 *
2544 * Note: for correct interlocking, the caller must already hold pin and
2545 * exclusive lock on each buffer it will store the cycle ID into. This
2546 * ensures that even if a VACUUM starts immediately afterwards, it cannot
2547 * process those pages until the page split is complete.
2548 */
2551{
2552 BTCycleId result = 0;
2553 int i;
2554
2555 /* Share lock is enough since this is a read-only operation */
2556 LWLockAcquire(BtreeVacuumLock, LW_SHARED);
2557
2558 for (i = 0; i < btvacinfo->num_vacuums; i++)
2559 {
2560 BTOneVacInfo *vac = &btvacinfo->vacuums[i];
2561
2562 if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
2563 vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
2564 {
2565 result = vac->cycleid;
2566 break;
2567 }
2568 }
2569
2570 LWLockRelease(BtreeVacuumLock);
2571 return result;
2572}
2573
2574/*
2575 * _bt_start_vacuum --- assign a cycle ID to a just-starting VACUUM operation
2576 *
2577 * Note: the caller must guarantee that it will eventually call
2578 * _bt_end_vacuum, else we'll permanently leak an array slot. To ensure
2579 * that this happens even in elog(FATAL) scenarios, the appropriate coding
2580 * is not just a PG_TRY, but
2581 * PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel))
2582 */
2585{
2586 BTCycleId result;
2587 int i;
2588 BTOneVacInfo *vac;
2589
2590 LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
2591
2592 /*
2593 * Assign the next cycle ID, being careful to avoid zero as well as the
2594 * reserved high values.
2595 */
2596 result = ++(btvacinfo->cycle_ctr);
2597 if (result == 0 || result > MAX_BT_CYCLE_ID)
2598 result = btvacinfo->cycle_ctr = 1;
2599
2600 /* Let's just make sure there's no entry already for this index */
2601 for (i = 0; i < btvacinfo->num_vacuums; i++)
2602 {
2603 vac = &btvacinfo->vacuums[i];
2604 if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
2605 vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
2606 {
2607 /*
2608 * Unlike most places in the backend, we have to explicitly
2609 * release our LWLock before throwing an error. This is because
2610 * we expect _bt_end_vacuum() to be called before transaction
2611 * abort cleanup can run to release LWLocks.
2612 */
2613 LWLockRelease(BtreeVacuumLock);
2614 elog(ERROR, "multiple active vacuums for index \"%s\"",
2616 }
2617 }
2618
2619 /* OK, add an entry */
2621 {
2622 LWLockRelease(BtreeVacuumLock);
2623 elog(ERROR, "out of btvacinfo slots");
2624 }
2626 vac->relid = rel->rd_lockInfo.lockRelId;
2627 vac->cycleid = result;
2629
2630 LWLockRelease(BtreeVacuumLock);
2631 return result;
2632}
2633
2634/*
2635 * _bt_end_vacuum --- mark a btree VACUUM operation as done
2636 *
2637 * Note: this is deliberately coded not to complain if no entry is found;
2638 * this allows the caller to put PG_TRY around the start_vacuum operation.
2639 */
2640void
2642{
2643 int i;
2644
2645 LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
2646
2647 /* Find the array entry */
2648 for (i = 0; i < btvacinfo->num_vacuums; i++)
2649 {
2650 BTOneVacInfo *vac = &btvacinfo->vacuums[i];
2651
2652 if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
2653 vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
2654 {
2655 /* Remove it by shifting down the last entry */
2656 *vac = btvacinfo->vacuums[btvacinfo->num_vacuums - 1];
2658 break;
2659 }
2660 }
2661
2662 LWLockRelease(BtreeVacuumLock);
2663}
2664
2665/*
2666 * _bt_end_vacuum wrapped as an on_shmem_exit callback function
2667 */
2668void
2670{
2672}
2673
2674/*
2675 * BTreeShmemSize --- report amount of shared memory space needed
2676 */
2677Size
2679{
2680 Size size;
2681
2682 size = offsetof(BTVacInfo, vacuums);
2684 return size;
2685}
2686
2687/*
2688 * BTreeShmemInit --- initialize this module's shared memory
2689 */
2690void
2692{
2693 bool found;
2694
2695 btvacinfo = (BTVacInfo *) ShmemInitStruct("BTree Vacuum State",
2697 &found);
2698
2699 if (!IsUnderPostmaster)
2700 {
2701 /* Initialize shared memory area */
2702 Assert(!found);
2703
2704 /*
2705 * It doesn't really matter what the cycle counter starts at, but
2706 * having it always start the same doesn't seem good. Seed with
2707 * low-order bits of time() instead.
2708 */
2709 btvacinfo->cycle_ctr = (BTCycleId) time(NULL);
2710
2713 }
2714 else
2715 Assert(found);
2716}
2717
2718bytea *
2719btoptions(Datum reloptions, bool validate)
2720{
2721 static const relopt_parse_elt tab[] = {
2722 {"fillfactor", RELOPT_TYPE_INT, offsetof(BTOptions, fillfactor)},
2723 {"vacuum_cleanup_index_scale_factor", RELOPT_TYPE_REAL,
2724 offsetof(BTOptions, vacuum_cleanup_index_scale_factor)},
2725 {"deduplicate_items", RELOPT_TYPE_BOOL,
2726 offsetof(BTOptions, deduplicate_items)}
2727 };
2728
2729 return (bytea *) build_reloptions(reloptions, validate,
2731 sizeof(BTOptions),
2732 tab, lengthof(tab));
2733}
2734
2735/*
2736 * btproperty() -- Check boolean properties of indexes.
2737 *
2738 * This is optional, but handling AMPROP_RETURNABLE here saves opening the rel
2739 * to call btcanreturn.
2740 */
2741bool
2742btproperty(Oid index_oid, int attno,
2743 IndexAMProperty prop, const char *propname,
2744 bool *res, bool *isnull)
2745{
2746 switch (prop)
2747 {
2748 case AMPROP_RETURNABLE:
2749 /* answer only for columns, not AM or whole index */
2750 if (attno == 0)
2751 return false;
2752 /* otherwise, btree can always return data */
2753 *res = true;
2754 return true;
2755
2756 default:
2757 return false; /* punt to generic code */
2758 }
2759}
2760
2761/*
2762 * btbuildphasename() -- Return name of index build phase.
2763 */
2764char *
2766{
2767 switch (phasenum)
2768 {
2770 return "initializing";
2772 return "scanning table";
2774 return "sorting live tuples";
2776 return "sorting dead tuples";
2778 return "loading tuples in tree";
2779 default:
2780 return NULL;
2781 }
2782}
2783
2784/*
2785 * _bt_truncate() -- create tuple without unneeded suffix attributes.
2786 *
2787 * Returns truncated pivot index tuple allocated in caller's memory context,
2788 * with key attributes copied from caller's firstright argument. If rel is
2789 * an INCLUDE index, non-key attributes will definitely be truncated away,
2790 * since they're not part of the key space. More aggressive suffix
2791 * truncation can take place when it's clear that the returned tuple does not
2792 * need one or more suffix key attributes. We only need to keep firstright
2793 * attributes up to and including the first non-lastleft-equal attribute.
2794 * Caller's insertion scankey is used to compare the tuples; the scankey's
2795 * argument values are not considered here.
2796 *
2797 * Note that returned tuple's t_tid offset will hold the number of attributes
2798 * present, so the original item pointer offset is not represented. Caller
2799 * should only change truncated tuple's downlink. Note also that truncated
2800 * key attributes are treated as containing "minus infinity" values by
2801 * _bt_compare().
2802 *
2803 * In the worst case (when a heap TID must be appended to distinguish lastleft
2804 * from firstright), the size of the returned tuple is the size of firstright
2805 * plus the size of an additional MAXALIGN()'d item pointer. This guarantee
2806 * is important, since callers need to stay under the 1/3 of a page
2807 * restriction on tuple size. If this routine is ever taught to truncate
2808 * within an attribute/datum, it will need to avoid returning an enlarged
2809 * tuple to caller when truncation + TOAST compression ends up enlarging the
2810 * final datum.
2811 */
2813_bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright,
2814 BTScanInsert itup_key)
2815{
2816 TupleDesc itupdesc = RelationGetDescr(rel);
2818 int keepnatts;
2819 IndexTuple pivot;
2820 IndexTuple tidpivot;
2821 ItemPointer pivotheaptid;
2822 Size newsize;
2823
2824 /*
2825 * We should only ever truncate non-pivot tuples from leaf pages. It's
2826 * never okay to truncate when splitting an internal page.
2827 */
2828 Assert(!BTreeTupleIsPivot(lastleft) && !BTreeTupleIsPivot(firstright));
2829
2830 /* Determine how many attributes must be kept in truncated tuple */
2831 keepnatts = _bt_keep_natts(rel, lastleft, firstright, itup_key);
2832
2833#ifdef DEBUG_NO_TRUNCATE
2834 /* Force truncation to be ineffective for testing purposes */
2835 keepnatts = nkeyatts + 1;
2836#endif
2837
2838 pivot = index_truncate_tuple(itupdesc, firstright,
2839 Min(keepnatts, nkeyatts));
2840
2841 if (BTreeTupleIsPosting(pivot))
2842 {
2843 /*
2844 * index_truncate_tuple() just returns a straight copy of firstright
2845 * when it has no attributes to truncate. When that happens, we may
2846 * need to truncate away a posting list here instead.
2847 */
2848 Assert(keepnatts == nkeyatts || keepnatts == nkeyatts + 1);
2850 pivot->t_info &= ~INDEX_SIZE_MASK;
2851 pivot->t_info |= MAXALIGN(BTreeTupleGetPostingOffset(firstright));
2852 }
2853
2854 /*
2855 * If there is a distinguishing key attribute within pivot tuple, we're
2856 * done
2857 */
2858 if (keepnatts <= nkeyatts)
2859 {
2860 BTreeTupleSetNAtts(pivot, keepnatts, false);
2861 return pivot;
2862 }
2863
2864 /*
2865 * We have to store a heap TID in the new pivot tuple, since no non-TID
2866 * key attribute value in firstright distinguishes the right side of the
2867 * split from the left side. nbtree conceptualizes this case as an
2868 * inability to truncate away any key attributes, since heap TID is
2869 * treated as just another key attribute (despite lacking a pg_attribute
2870 * entry).
2871 *
2872 * Use enlarged space that holds a copy of pivot. We need the extra space
2873 * to store a heap TID at the end (using the special pivot tuple
2874 * representation). Note that the original pivot already has firstright's
2875 * possible posting list/non-key attribute values removed at this point.
2876 */
2877 newsize = MAXALIGN(IndexTupleSize(pivot)) + MAXALIGN(sizeof(ItemPointerData));
2878 tidpivot = palloc0(newsize);
2879 memcpy(tidpivot, pivot, MAXALIGN(IndexTupleSize(pivot)));
2880 /* Cannot leak memory here */
2881 pfree(pivot);
2882
2883 /*
2884 * Store all of firstright's key attribute values plus a tiebreaker heap
2885 * TID value in enlarged pivot tuple
2886 */
2887 tidpivot->t_info &= ~INDEX_SIZE_MASK;
2888 tidpivot->t_info |= newsize;
2889 BTreeTupleSetNAtts(tidpivot, nkeyatts, true);
2890 pivotheaptid = BTreeTupleGetHeapTID(tidpivot);
2891
2892 /*
2893 * Lehman & Yao use lastleft as the leaf high key in all cases, but don't
2894 * consider suffix truncation. It seems like a good idea to follow that
2895 * example in cases where no truncation takes place -- use lastleft's heap
2896 * TID. (This is also the closest value to negative infinity that's
2897 * legally usable.)
2898 */
2899 ItemPointerCopy(BTreeTupleGetMaxHeapTID(lastleft), pivotheaptid);
2900
2901 /*
2902 * We're done. Assert() that heap TID invariants hold before returning.
2903 *
2904 * Lehman and Yao require that the downlink to the right page, which is to
2905 * be inserted into the parent page in the second phase of a page split be
2906 * a strict lower bound on items on the right page, and a non-strict upper
2907 * bound for items on the left page. Assert that heap TIDs follow these
2908 * invariants, since a heap TID value is apparently needed as a
2909 * tiebreaker.
2910 */
2911#ifndef DEBUG_NO_TRUNCATE
2913 BTreeTupleGetHeapTID(firstright)) < 0);
2914 Assert(ItemPointerCompare(pivotheaptid,
2915 BTreeTupleGetHeapTID(lastleft)) >= 0);
2916 Assert(ItemPointerCompare(pivotheaptid,
2917 BTreeTupleGetHeapTID(firstright)) < 0);
2918#else
2919
2920 /*
2921 * Those invariants aren't guaranteed to hold for lastleft + firstright
2922 * heap TID attribute values when they're considered here only because
2923 * DEBUG_NO_TRUNCATE is defined (a heap TID is probably not actually
2924 * needed as a tiebreaker). DEBUG_NO_TRUNCATE must therefore use a heap
2925 * TID value that always works as a strict lower bound for items to the
2926 * right. In particular, it must avoid using firstright's leading key
2927 * attribute values along with lastleft's heap TID value when lastleft's
2928 * TID happens to be greater than firstright's TID.
2929 */
2930 ItemPointerCopy(BTreeTupleGetHeapTID(firstright), pivotheaptid);
2931
2932 /*
2933 * Pivot heap TID should never be fully equal to firstright. Note that
2934 * the pivot heap TID will still end up equal to lastleft's heap TID when
2935 * that's the only usable value.
2936 */
2937 ItemPointerSetOffsetNumber(pivotheaptid,
2939 Assert(ItemPointerCompare(pivotheaptid,
2940 BTreeTupleGetHeapTID(firstright)) < 0);
2941#endif
2942
2943 return tidpivot;
2944}
2945
2946/*
2947 * _bt_keep_natts - how many key attributes to keep when truncating.
2948 *
2949 * Caller provides two tuples that enclose a split point. Caller's insertion
2950 * scankey is used to compare the tuples; the scankey's argument values are
2951 * not considered here.
2952 *
2953 * This can return a number of attributes that is one greater than the
2954 * number of key attributes for the index relation. This indicates that the
2955 * caller must use a heap TID as a unique-ifier in new pivot tuple.
2956 */
2957static int
2959 BTScanInsert itup_key)
2960{
2961 int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
2962 TupleDesc itupdesc = RelationGetDescr(rel);
2963 int keepnatts;
2964 ScanKey scankey;
2965
2966 /*
2967 * _bt_compare() treats truncated key attributes as having the value minus
2968 * infinity, which would break searches within !heapkeyspace indexes. We
2969 * must still truncate away non-key attribute values, though.
2970 */
2971 if (!itup_key->heapkeyspace)
2972 return nkeyatts;
2973
2974 scankey = itup_key->scankeys;
2975 keepnatts = 1;
2976 for (int attnum = 1; attnum <= nkeyatts; attnum++, scankey++)
2977 {
2978 Datum datum1,
2979 datum2;
2980 bool isNull1,
2981 isNull2;
2982
2983 datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
2984 datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
2985
2986 if (isNull1 != isNull2)
2987 break;
2988
2989 if (!isNull1 &&
2991 scankey->sk_collation,
2992 datum1,
2993 datum2)) != 0)
2994 break;
2995
2996 keepnatts++;
2997 }
2998
2999 /*
3000 * Assert that _bt_keep_natts_fast() agrees with us in passing. This is
3001 * expected in an allequalimage index.
3002 */
3003 Assert(!itup_key->allequalimage ||
3004 keepnatts == _bt_keep_natts_fast(rel, lastleft, firstright));
3005
3006 return keepnatts;
3007}
3008
3009/*
3010 * _bt_keep_natts_fast - fast bitwise variant of _bt_keep_natts.
3011 *
3012 * This is exported so that a candidate split point can have its effect on
3013 * suffix truncation inexpensively evaluated ahead of time when finding a
3014 * split location. A naive bitwise approach to datum comparisons is used to
3015 * save cycles.
3016 *
3017 * The approach taken here usually provides the same answer as _bt_keep_natts
3018 * will (for the same pair of tuples from a heapkeyspace index), since the
3019 * majority of btree opclasses can never indicate that two datums are equal
3020 * unless they're bitwise equal after detoasting. When an index only has
3021 * "equal image" columns, routine is guaranteed to give the same result as
3022 * _bt_keep_natts would.
3023 *
3024 * Callers can rely on the fact that attributes considered equal here are
3025 * definitely also equal according to _bt_keep_natts, even when the index uses
3026 * an opclass or collation that is not "allequalimage"/deduplication-safe.
3027 * This weaker guarantee is good enough for nbtsplitloc.c caller, since false
3028 * negatives generally only have the effect of making leaf page splits use a
3029 * more balanced split point.
3030 */
3031int
3033{
3034 TupleDesc itupdesc = RelationGetDescr(rel);
3036 int keepnatts;
3037
3038 keepnatts = 1;
3039 for (int attnum = 1; attnum <= keysz; attnum++)
3040 {
3041 Datum datum1,
3042 datum2;
3043 bool isNull1,
3044 isNull2;
3045 CompactAttribute *att;
3046
3047 datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
3048 datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
3049 att = TupleDescCompactAttr(itupdesc, attnum - 1);
3050
3051 if (isNull1 != isNull2)
3052 break;
3053
3054 if (!isNull1 &&
3055 !datum_image_eq(datum1, datum2, att->attbyval, att->attlen))
3056 break;
3057
3058 keepnatts++;
3059 }
3060
3061 return keepnatts;
3062}
3063
3064/*
3065 * _bt_check_natts() -- Verify tuple has expected number of attributes.
3066 *
3067 * Returns value indicating if the expected number of attributes were found
3068 * for a particular offset on page. This can be used as a general purpose
3069 * sanity check.
3070 *
3071 * Testing a tuple directly with BTreeTupleGetNAtts() should generally be
3072 * preferred to calling here. That's usually more convenient, and is always
3073 * more explicit. Call here instead when offnum's tuple may be a negative
3074 * infinity tuple that uses the pre-v11 on-disk representation, or when a low
3075 * context check is appropriate. This routine is as strict as possible about
3076 * what is expected on each version of btree.
3077 */
3078bool
3079_bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
3080{
3083 BTPageOpaque opaque = BTPageGetOpaque(page);
3084 IndexTuple itup;
3085 int tupnatts;
3086
3087 /*
3088 * We cannot reliably test a deleted or half-dead page, since they have
3089 * dummy high keys
3090 */
3091 if (P_IGNORE(opaque))
3092 return true;
3093
3094 Assert(offnum >= FirstOffsetNumber &&
3095 offnum <= PageGetMaxOffsetNumber(page));
3096
3097 itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
3098 tupnatts = BTreeTupleGetNAtts(itup, rel);
3099
3100 /* !heapkeyspace indexes do not support deduplication */
3101 if (!heapkeyspace && BTreeTupleIsPosting(itup))
3102 return false;
3103
3104 /* Posting list tuples should never have "pivot heap TID" bit set */
3105 if (BTreeTupleIsPosting(itup) &&
3108 return false;
3109
3110 /* INCLUDE indexes do not support deduplication */
3111 if (natts != nkeyatts && BTreeTupleIsPosting(itup))
3112 return false;
3113
3114 if (P_ISLEAF(opaque))
3115 {
3116 if (offnum >= P_FIRSTDATAKEY(opaque))
3117 {
3118 /*
3119 * Non-pivot tuple should never be explicitly marked as a pivot
3120 * tuple
3121 */
3122 if (BTreeTupleIsPivot(itup))
3123 return false;
3124
3125 /*
3126 * Leaf tuples that are not the page high key (non-pivot tuples)
3127 * should never be truncated. (Note that tupnatts must have been
3128 * inferred, even with a posting list tuple, because only pivot
3129 * tuples store tupnatts directly.)
3130 */
3131 return tupnatts == natts;
3132 }
3133 else
3134 {
3135 /*
3136 * Rightmost page doesn't contain a page high key, so tuple was
3137 * checked above as ordinary leaf tuple
3138 */
3139 Assert(!P_RIGHTMOST(opaque));
3140
3141 /*
3142 * !heapkeyspace high key tuple contains only key attributes. Note
3143 * that tupnatts will only have been explicitly represented in
3144 * !heapkeyspace indexes that happen to have non-key attributes.
3145 */
3146 if (!heapkeyspace)
3147 return tupnatts == nkeyatts;
3148
3149 /* Use generic heapkeyspace pivot tuple handling */
3150 }
3151 }
3152 else /* !P_ISLEAF(opaque) */
3153 {
3154 if (offnum == P_FIRSTDATAKEY(opaque))
3155 {
3156 /*
3157 * The first tuple on any internal page (possibly the first after
3158 * its high key) is its negative infinity tuple. Negative
3159 * infinity tuples are always truncated to zero attributes. They
3160 * are a particular kind of pivot tuple.
3161 */
3162 if (heapkeyspace)
3163 return tupnatts == 0;
3164
3165 /*
3166 * The number of attributes won't be explicitly represented if the
3167 * negative infinity tuple was generated during a page split that
3168 * occurred with a version of Postgres before v11. There must be
3169 * a problem when there is an explicit representation that is
3170 * non-zero, or when there is no explicit representation and the
3171 * tuple is evidently not a pre-pg_upgrade tuple.
3172 *
3173 * Prior to v11, downlinks always had P_HIKEY as their offset.
3174 * Accept that as an alternative indication of a valid
3175 * !heapkeyspace negative infinity tuple.
3176 */
3177 return tupnatts == 0 ||
3179 }
3180 else
3181 {
3182 /*
3183 * !heapkeyspace downlink tuple with separator key contains only
3184 * key attributes. Note that tupnatts will only have been
3185 * explicitly represented in !heapkeyspace indexes that happen to
3186 * have non-key attributes.
3187 */
3188 if (!heapkeyspace)
3189 return tupnatts == nkeyatts;
3190
3191 /* Use generic heapkeyspace pivot tuple handling */
3192 }
3193 }
3194
3195 /* Handle heapkeyspace pivot tuples (excluding minus infinity items) */
3196 Assert(heapkeyspace);
3197
3198 /*
3199 * Explicit representation of the number of attributes is mandatory with
3200 * heapkeyspace index pivot tuples, regardless of whether or not there are
3201 * non-key attributes.
3202 */
3203 if (!BTreeTupleIsPivot(itup))
3204 return false;
3205
3206 /* Pivot tuple should not use posting list representation (redundant) */
3207 if (BTreeTupleIsPosting(itup))
3208 return false;
3209
3210 /*
3211 * Heap TID is a tiebreaker key attribute, so it cannot be untruncated
3212 * when any other key attribute is truncated
3213 */
3214 if (BTreeTupleGetHeapTID(itup) != NULL && tupnatts != nkeyatts)
3215 return false;
3216
3217 /*
3218 * Pivot tuple must have at least one untruncated key attribute (minus
3219 * infinity pivot tuples are the only exception). Pivot tuples can never
3220 * represent that there is a value present for a key attribute that
3221 * exceeds pg_index.indnkeyatts for the index.
3222 */
3223 return tupnatts > 0 && tupnatts <= nkeyatts;
3224}
3225
3226/*
3227 *
3228 * _bt_check_third_page() -- check whether tuple fits on a btree page at all.
3229 *
3230 * We actually need to be able to fit three items on every page, so restrict
3231 * any one item to 1/3 the per-page available space. Note that itemsz should
3232 * not include the ItemId overhead.
3233 *
3234 * It might be useful to apply TOAST methods rather than throw an error here.
3235 * Using out of line storage would break assumptions made by suffix truncation
3236 * and by contrib/amcheck, though.
3237 */
3238void
3239_bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace,
3240 Page page, IndexTuple newtup)
3241{
3242 Size itemsz;
3243 BTPageOpaque opaque;
3244
3245 itemsz = MAXALIGN(IndexTupleSize(newtup));
3246
3247 /* Double check item size against limit */
3248 if (itemsz <= BTMaxItemSize(page))
3249 return;
3250
3251 /*
3252 * Tuple is probably too large to fit on page, but it's possible that the
3253 * index uses version 2 or version 3, or that page is an internal page, in
3254 * which case a slightly higher limit applies.
3255 */
3256 if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid(page))
3257 return;
3258
3259 /*
3260 * Internal page insertions cannot fail here, because that would mean that
3261 * an earlier leaf level insertion that should have failed didn't
3262 */
3263 opaque = BTPageGetOpaque(page);
3264 if (!P_ISLEAF(opaque))
3265 elog(ERROR, "cannot insert oversized tuple of size %zu on internal page of index \"%s\"",
3266 itemsz, RelationGetRelationName(rel));
3267
3268 ereport(ERROR,
3269 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
3270 errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"",
3271 itemsz,
3272 needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION,
3273 needheaptidspace ? BTMaxItemSize(page) :
3276 errdetail("Index row references tuple (%u,%u) in relation \"%s\".",
3280 errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
3281 "Consider a function index of an MD5 hash of the value, "
3282 "or use full text indexing."),
3284}
3285
3286/*
3287 * Are all attributes in rel "equality is image equality" attributes?
3288 *
3289 * We use each attribute's BTEQUALIMAGE_PROC opclass procedure. If any
3290 * opclass either lacks a BTEQUALIMAGE_PROC procedure or returns false, we
3291 * return false; otherwise we return true.
3292 *
3293 * Returned boolean value is stored in index metapage during index builds.
3294 * Deduplication can only be used when we return true.
3295 */
3296bool
3297_bt_allequalimage(Relation rel, bool debugmessage)
3298{
3299 bool allequalimage = true;
3300
3301 /* INCLUDE indexes can never support deduplication */
3304 return false;
3305
3306 for (int i = 0; i < IndexRelationGetNumberOfKeyAttributes(rel); i++)
3307 {
3308 Oid opfamily = rel->rd_opfamily[i];
3309 Oid opcintype = rel->rd_opcintype[i];
3310 Oid collation = rel->rd_indcollation[i];
3311 Oid equalimageproc;
3312
3313 equalimageproc = get_opfamily_proc(opfamily, opcintype, opcintype,
3315
3316 /*
3317 * If there is no BTEQUALIMAGE_PROC then deduplication is assumed to
3318 * be unsafe. Otherwise, actually call proc and see what it says.
3319 */
3320 if (!OidIsValid(equalimageproc) ||
3321 !DatumGetBool(OidFunctionCall1Coll(equalimageproc, collation,
3322 ObjectIdGetDatum(opcintype))))
3323 {
3324 allequalimage = false;
3325 break;
3326 }
3327 }
3328
3329 if (debugmessage)
3330 {
3331 if (allequalimage)
3332 elog(DEBUG1, "index \"%s\" can safely use deduplication",
3334 else
3335 elog(DEBUG1, "index \"%s\" cannot use deduplication",
3337 }
3338
3339 return allequalimage;
3340}
IndexAMProperty
Definition: amapi.h:35
@ AMPROP_RETURNABLE
Definition: amapi.h:43
int16 AttrNumber
Definition: attnum.h:21
#define InvalidAttrNumber
Definition: attnum.h:23
int Buffer
Definition: buf.h:23
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:3985
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:4930
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:396
static Item PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:354
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:244
PageData * Page
Definition: bufpage.h:82
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:372
#define Min(x, y)
Definition: c.h:961
#define INVERT_COMPARE_RESULT(var)
Definition: c.h:1063
#define MAXALIGN(LEN)
Definition: c.h:768
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:204
#define Max(x, y)
Definition: c.h:955
#define Assert(condition)
Definition: c.h:815
int64_t int64
Definition: c.h:485
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:420
int16_t int16
Definition: c.h:483
int32_t int32
Definition: c.h:484
#define unlikely(x)
Definition: c.h:333
#define lengthof(array)
Definition: c.h:745
#define OidIsValid(objectId)
Definition: c.h:732
size_t Size
Definition: c.h:562
bool datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen)
Definition: datum.c:266
struct cursor * cur
Definition: ecpg.c:29
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errhint(const char *fmt,...)
Definition: elog.c:1317
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:1149
Datum OidFunctionCall1Coll(Oid functionId, Oid collation, Datum arg1)
Definition: fmgr.c:1411
bool IsUnderPostmaster
Definition: globals.c:119
int MaxBackends
Definition: globals.c:145
for(;;)
FmgrInfo * index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:862
IndexTuple index_truncate_tuple(TupleDesc sourceDescriptor, IndexTuple source, int leavenatts)
Definition: indextuple.c:576
int j
Definition: isn.c:73
int i
Definition: isn.c:72
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
#define ItemIdMarkDead(itemId)
Definition: itemid.h:179
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:51
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:35
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static OffsetNumber ItemPointerGetOffsetNumberNoCheck(const ItemPointerData *pointer)
Definition: itemptr.h:114
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
Definition: itemptr.h:172
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:117
#define MaxIndexTuplesPerPage
Definition: itup.h:167
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:796
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
@ LW_SHARED
Definition: lwlock.h:115
@ LW_EXCLUSIVE
Definition: lwlock.h:114
void pfree(void *pointer)
Definition: mcxt.c:1521
void * palloc0(Size size)
Definition: mcxt.c:1347
void * palloc(Size size)
Definition: mcxt.c:1317
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1023
void _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
Definition: nbtpage.c:739
Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access)
Definition: nbtpage.c:845
void _bt_unlockbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1070
void _bt_lockbuf(Relation rel, Buffer buf, int access)
Definition: nbtpage.c:1039
void _bt_parallel_primscan_schedule(IndexScanDesc scan, BlockNumber curr_page)
Definition: nbtree.c:824
void _bt_parallel_done(IndexScanDesc scan)
Definition: nbtree.c:774
#define BTScanPosIsPinned(scanpos)
Definition: nbtree.h:998
#define BT_PIVOT_HEAP_TID_ATTR
Definition: nbtree.h:465
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:518
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:480
#define P_ISLEAF(opaque)
Definition: nbtree.h:220
#define P_HIKEY
Definition: nbtree.h:367
#define PROGRESS_BTREE_PHASE_PERFORMSORT_2
Definition: nbtree.h:1154
#define BTMaxItemSizeNoHeapTid(page)
Definition: nbtree.h:169
#define PROGRESS_BTREE_PHASE_LEAF_LOAD
Definition: nbtree.h:1155
#define BTP_HAS_GARBAGE
Definition: nbtree.h:82
#define BTEQUALIMAGE_PROC
Definition: nbtree.h:715
#define BTORDER_PROC
Definition: nbtree.h:712
#define BTPageGetOpaque(page)
Definition: nbtree.h:73
#define BTREE_VERSION
Definition: nbtree.h:150
#define BTScanPosIsValid(scanpos)
Definition: nbtree.h:1015
#define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN
Definition: nbtree.h:1152
#define SK_BT_INDOPTION_SHIFT
Definition: nbtree.h:1121
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:369
#define MAX_BT_CYCLE_ID
Definition: nbtree.h:93
#define PROGRESS_BTREE_PHASE_PERFORMSORT_1
Definition: nbtree.h:1153
uint16 BTCycleId
Definition: nbtree.h:29
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition: nbtree.h:529
#define SK_BT_REQBKWD
Definition: nbtree.h:1120
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:219
#define BTMaxItemSize(page)
Definition: nbtree.h:164
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1123
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:544
#define BT_READ
Definition: nbtree.h:724
#define SK_BT_REQFWD
Definition: nbtree.h:1119
#define SK_BT_DESC
Definition: nbtree.h:1122
#define P_IGNORE(opaque)
Definition: nbtree.h:225
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:664
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:492
#define BTREE_NOVAC_VERSION
Definition: nbtree.h:152
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:638
static void BTreeTupleSetNAtts(IndexTuple itup, uint16 nkeyatts, bool heaptid)
Definition: nbtree.h:595
#define BTreeTupleGetNAtts(itup, rel)
Definition: nbtree.h:577
BTScanOpaqueData * BTScanOpaque
Definition: nbtree.h:1078
static void _bt_rewind_nonrequired_arrays(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:542
void _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, Page page, IndexTuple newtup)
Definition: nbtutils.c:3239
static bool _bt_check_compare(IndexScanDesc scan, ScanDirection dir, IndexTuple tuple, int tupnatts, TupleDesc tupdesc, bool advancenonrequired, bool prechecked, bool firstmatch, bool *continuescan, int *ikey)
Definition: nbtutils.c:1843
void _bt_end_vacuum(Relation rel)
Definition: nbtutils.c:2641
bool _bt_checkkeys(IndexScanDesc scan, BTReadPageState *pstate, bool arrayKeys, IndexTuple tuple, int tupnatts)
Definition: nbtutils.c:1627
void _bt_end_vacuum_callback(int code, Datum arg)
Definition: nbtutils.c:2669
int _bt_binsrch_array_skey(FmgrInfo *orderproc, bool cur_elem_trig, ScanDirection dir, Datum tupdatum, bool tupnull, BTArrayKeyInfo *array, ScanKey cur, int32 *set_elem_result)
Definition: nbtutils.c:271
void _bt_freestack(BTStack stack)
Definition: nbtutils.c:172
void BTreeShmemInit(void)
Definition: nbtutils.c:2691
struct BTVacInfo BTVacInfo
BTCycleId _bt_vacuum_cycleid(Relation rel)
Definition: nbtutils.c:2550
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:80
void _bt_killitems(IndexScanDesc scan)
Definition: nbtutils.c:2333
static bool _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate, IndexTuple tuple, int tupnatts, TupleDesc tupdesc, int sktrig, bool sktrig_required)
Definition: nbtutils.c:863
bool _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:743
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition: nbtutils.c:3079
IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition: nbtutils.c:2813
#define LOOK_AHEAD_REQUIRED_RECHECKS
Definition: nbtutils.c:27
int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
Definition: nbtutils.c:3032
#define LOOK_AHEAD_DEFAULT_DISTANCE
Definition: nbtutils.c:28
static BTVacInfo * btvacinfo
Definition: nbtutils.c:2537
char * btbuildphasename(int64 phasenum)
Definition: nbtutils.c:2765
static bool _bt_tuple_before_array_skeys(IndexScanDesc scan, ScanDirection dir, IndexTuple tuple, TupleDesc tupdesc, int tupnatts, bool readpagetup, int sktrig, bool *scanBehind)
Definition: nbtutils.c:619
bytea * btoptions(Datum reloptions, bool validate)
Definition: nbtutils.c:2719
static bool _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:451
Size BTreeShmemSize(void)
Definition: nbtutils.c:2678
static int _bt_keep_natts(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition: nbtutils.c:2958
bool btproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull)
Definition: nbtutils.c:2742
bool _bt_allequalimage(Relation rel, bool debugmessage)
Definition: nbtutils.c:3297
static void _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate, int tupnatts, TupleDesc tupdesc)
Definition: nbtutils.c:2233
static int32 _bt_compare_array_skey(FmgrInfo *orderproc, Datum tupdatum, bool tupnull, Datum arrdatum, ScanKey cur)
Definition: nbtutils.c:201
struct BTOneVacInfo BTOneVacInfo
void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:413
static bool _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts, TupleDesc tupdesc, ScanDirection dir, bool *continuescan)
Definition: nbtutils.c:2049
bool _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir, IndexTuple finaltup)
Definition: nbtutils.c:1782
BTCycleId _bt_start_vacuum(Relation rel)
Definition: nbtutils.c:2584
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
int16 attnum
Definition: pg_attribute.h:74
void * arg
static char * buf
Definition: pg_test_fsync.c:72
static int fillfactor
Definition: pgbench.c:187
static bool DatumGetBool(Datum X)
Definition: postgres.h:95
uintptr_t Datum
Definition: postgres.h:69
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:257
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:317
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:207
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
#define PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE
Definition: progress.h:107
#define RelationGetDescr(relation)
Definition: rel.h:531
#define RelationGetRelationName(relation)
Definition: rel.h:539
#define IndexRelationGetNumberOfAttributes(relation)
Definition: rel.h:517
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:524
int errtableconstraint(Relation rel, const char *conname)
Definition: relcache.c:6023
void * build_reloptions(Datum reloptions, bool validate, relopt_kind kind, Size relopt_struct_size, const relopt_parse_elt *relopt_elems, int num_relopt_elems)
Definition: reloptions.c:1908
@ RELOPT_KIND_BTREE
Definition: reloptions.h:44
@ RELOPT_TYPE_INT
Definition: reloptions.h:32
@ RELOPT_TYPE_BOOL
Definition: reloptions.h:31
@ RELOPT_TYPE_REAL
Definition: reloptions.h:33
void ScanKeyEntryInitializeWithInfo(ScanKey entry, int flags, AttrNumber attributeNumber, StrategyNumber strategy, Oid subtype, Oid collation, FmgrInfo *finfo, Datum argument)
Definition: scankey.c:101
#define ScanDirectionIsForward(direction)
Definition: sdir.h:64
#define ScanDirectionIsBackward(direction)
Definition: sdir.h:50
#define ScanDirectionIsNoMovement(direction)
Definition: sdir.h:57
ScanDirection
Definition: sdir.h:25
Size add_size(Size s1, Size s2)
Definition: shmem.c:488
Size mul_size(Size s1, Size s2)
Definition: shmem.c:505
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:382
#define SK_ROW_HEADER
Definition: skey.h:117
#define SK_SEARCHARRAY
Definition: skey.h:120
#define SK_ROW_MEMBER
Definition: skey.h:118
#define SK_SEARCHNOTNULL
Definition: skey.h:122
#define SK_SEARCHNULL
Definition: skey.h:121
#define SK_ROW_END
Definition: skey.h:119
ScanKeyData * ScanKey
Definition: skey.h:75
#define SK_ISNULL
Definition: skey.h:115
static pg_noinline void Size size
Definition: slab.c:607
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define InvalidStrategy
Definition: stratnum.h:24
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define BTLessEqualStrategyNumber
Definition: stratnum.h:30
#define BTGreaterEqualStrategyNumber
Definition: stratnum.h:32
Datum * elem_values
Definition: nbtree.h:1033
BTCycleId cycleid
Definition: nbtutils.c:2526
LockRelId relid
Definition: nbtutils.c:2525
bool firstmatch
Definition: nbtree.h:1103
bool continuescan
Definition: nbtree.h:1096
IndexTuple finaltup
Definition: nbtree.h:1088
bool prechecked
Definition: nbtree.h:1102
OffsetNumber minoff
Definition: nbtree.h:1086
int16 targetdistance
Definition: nbtree.h:1110
OffsetNumber offnum
Definition: nbtree.h:1092
int16 rechecks
Definition: nbtree.h:1109
OffsetNumber skip
Definition: nbtree.h:1095
OffsetNumber maxoff
Definition: nbtree.h:1087
bool allequalimage
Definition: nbtree.h:792
bool heapkeyspace
Definition: nbtree.h:791
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:798
bool needPrimScan
Definition: nbtree.h:1045
BTArrayKeyInfo * arrayKeys
Definition: nbtree.h:1048
FmgrInfo * orderProcs
Definition: nbtree.h:1049
BTScanPosData currPos
Definition: nbtree.h:1074
int * killedItems
Definition: nbtree.h:1053
bool oppositeDirCheck
Definition: nbtree.h:1047
ScanKey keyData
Definition: nbtree.h:1041
Buffer buf
Definition: nbtree.h:958
BlockNumber currPage
Definition: nbtree.h:961
int firstItem
Definition: nbtree.h:989
BTScanPosItem items[MaxTIDsPerBTreePage]
Definition: nbtree.h:993
ScanDirection dir
Definition: nbtree.h:967
XLogRecPtr lsn
Definition: nbtree.h:964
ItemPointerData heapTid
Definition: nbtree.h:951
OffsetNumber indexOffset
Definition: nbtree.h:952
struct BTStackData * bts_parent
Definition: nbtree.h:741
BTCycleId cycle_ctr
Definition: nbtutils.c:2531
int num_vacuums
Definition: nbtutils.c:2532
BTOneVacInfo vacuums[FLEXIBLE_ARRAY_MEMBER]
Definition: nbtutils.c:2534
int max_vacuums
Definition: nbtutils.c:2533
int16 attlen
Definition: tupdesc.h:70
Definition: fmgr.h:57
struct ParallelIndexScanDescData * parallel_scan
Definition: relscan.h:183
Relation indexRelation
Definition: relscan.h:135
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49
LockRelId lockRelId
Definition: rel.h:46
Definition: rel.h:39
Oid relId
Definition: rel.h:40
Oid dbId
Definition: rel.h:41
LockInfoData rd_lockInfo
Definition: rel.h:114
Oid * rd_opcintype
Definition: rel.h:208
int16 * rd_indoption
Definition: rel.h:211
Form_pg_index rd_index
Definition: rel.h:192
Oid * rd_opfamily
Definition: rel.h:207
Oid * rd_indcollation
Definition: rel.h:217
int sk_flags
Definition: skey.h:66
Datum sk_argument
Definition: skey.h:72
FmgrInfo sk_func
Definition: skey.h:71
Oid sk_collation
Definition: skey.h:70
StrategyNumber sk_strategy
Definition: skey.h:68
AttrNumber sk_attno
Definition: skey.h:67
Definition: c.h:644
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:168