PostgreSQL Source Code  git master
nbtutils.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nbtutils.c
4  * Utility code for Postgres btree implementation.
5  *
6  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/access/nbtree/nbtutils.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 
16 #include "postgres.h"
17 
18 #include <time.h>
19 
20 #include "access/nbtree.h"
21 #include "access/reloptions.h"
22 #include "access/relscan.h"
23 #include "commands/progress.h"
24 #include "lib/qunique.h"
25 #include "miscadmin.h"
26 #include "utils/array.h"
27 #include "utils/datum.h"
28 #include "utils/lsyscache.h"
29 #include "utils/memutils.h"
30 #include "utils/rel.h"
31 
32 
33 typedef struct BTSortArrayContext
34 {
37  bool reverse;
39 
41  StrategyNumber strat,
42  Datum *elems, int nelems);
43 static int _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
44  bool reverse,
45  Datum *elems, int nelems);
46 static int _bt_compare_array_elements(const void *a, const void *b, void *arg);
48  ScanKey leftarg, ScanKey rightarg,
49  bool *result);
50 static bool _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption);
51 static void _bt_mark_scankey_required(ScanKey skey);
52 static bool _bt_check_rowcompare(ScanKey skey,
53  IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
54  ScanDirection dir, bool *continuescan);
55 static int _bt_keep_natts(Relation rel, IndexTuple lastleft,
56  IndexTuple firstright, BTScanInsert itup_key);
57 
58 
59 /*
60  * _bt_mkscankey
61  * Build an insertion scan key that contains comparison data from itup
62  * as well as comparator routines appropriate to the key datatypes.
63  *
64  * The result is intended for use with _bt_compare() and _bt_truncate().
65  * Callers that don't need to fill out the insertion scankey arguments
66  * (e.g. they use an ad-hoc comparison routine, or only need a scankey
67  * for _bt_truncate()) can pass a NULL index tuple. The scankey will
68  * be initialized as if an "all truncated" pivot tuple was passed
69  * instead.
70  *
71  * Note that we may occasionally have to share lock the metapage to
72  * determine whether or not the keys in the index are expected to be
73  * unique (i.e. if this is a "heapkeyspace" index). We assume a
74  * heapkeyspace index when caller passes a NULL tuple, allowing index
75  * build callers to avoid accessing the non-existent metapage. We
76  * also assume that the index is _not_ allequalimage when a NULL tuple
77  * is passed; CREATE INDEX callers call _bt_allequalimage() to set the
78  * field themselves.
79  */
82 {
84  ScanKey skey;
85  TupleDesc itupdesc;
86  int indnkeyatts;
87  int16 *indoption;
88  int tupnatts;
89  int i;
90 
91  itupdesc = RelationGetDescr(rel);
92  indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
93  indoption = rel->rd_indoption;
94  tupnatts = itup ? BTreeTupleGetNAtts(itup, rel) : 0;
95 
97 
98  /*
99  * We'll execute search using scan key constructed on key columns.
100  * Truncated attributes and non-key attributes are omitted from the final
101  * scan key.
102  */
103  key = palloc(offsetof(BTScanInsertData, scankeys) +
104  sizeof(ScanKeyData) * indnkeyatts);
105  if (itup)
106  _bt_metaversion(rel, &key->heapkeyspace, &key->allequalimage);
107  else
108  {
109  /* Utility statement callers can set these fields themselves */
110  key->heapkeyspace = true;
111  key->allequalimage = false;
112  }
113  key->anynullkeys = false; /* initial assumption */
114  key->nextkey = false; /* usual case, required by btinsert */
115  key->backward = false; /* usual case, required by btinsert */
116  key->keysz = Min(indnkeyatts, tupnatts);
117  key->scantid = key->heapkeyspace && itup ?
118  BTreeTupleGetHeapTID(itup) : NULL;
119  skey = key->scankeys;
120  for (i = 0; i < indnkeyatts; i++)
121  {
122  FmgrInfo *procinfo;
123  Datum arg;
124  bool null;
125  int flags;
126 
127  /*
128  * We can use the cached (default) support procs since no cross-type
129  * comparison can be needed.
130  */
131  procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
132 
133  /*
134  * Key arguments built from truncated attributes (or when caller
135  * provides no tuple) are defensively represented as NULL values. They
136  * should never be used.
137  */
138  if (i < tupnatts)
139  arg = index_getattr(itup, i + 1, itupdesc, &null);
140  else
141  {
142  arg = (Datum) 0;
143  null = true;
144  }
145  flags = (null ? SK_ISNULL : 0) | (indoption[i] << SK_BT_INDOPTION_SHIFT);
147  flags,
148  (AttrNumber) (i + 1),
150  InvalidOid,
151  rel->rd_indcollation[i],
152  procinfo,
153  arg);
154  /* Record if any key attribute is NULL (or truncated) */
155  if (null)
156  key->anynullkeys = true;
157  }
158 
159  /*
160  * In NULLS NOT DISTINCT mode, we pretend that there are no null keys, so
161  * that full uniqueness check is done.
162  */
163  if (rel->rd_index->indnullsnotdistinct)
164  key->anynullkeys = false;
165 
166  return key;
167 }
168 
169 /*
170  * free a retracement stack made by _bt_search.
171  */
172 void
174 {
175  BTStack ostack;
176 
177  while (stack != NULL)
178  {
179  ostack = stack;
180  stack = stack->bts_parent;
181  pfree(ostack);
182  }
183 }
184 
185 
186 /*
187  * _bt_preprocess_array_keys() -- Preprocess SK_SEARCHARRAY scan keys
188  *
189  * If there are any SK_SEARCHARRAY scan keys, deconstruct the array(s) and
190  * set up BTArrayKeyInfo info for each one that is an equality-type key.
191  * Prepare modified scan keys in so->arrayKeyData, which will hold the current
192  * array elements during each primitive indexscan operation. For inequality
193  * array keys, it's sufficient to find the extreme element value and replace
194  * the whole array with that scalar value.
195  *
196  * Note: the reason we need so->arrayKeyData, rather than just scribbling
197  * on scan->keyData, is that callers are permitted to call btrescan without
198  * supplying a new set of scankey data.
199  */
200 void
202 {
203  BTScanOpaque so = (BTScanOpaque) scan->opaque;
204  int numberOfKeys = scan->numberOfKeys;
205  int16 *indoption = scan->indexRelation->rd_indoption;
206  int numArrayKeys;
207  ScanKey cur;
208  int i;
209  MemoryContext oldContext;
210 
211  /* Quick check to see if there are any array keys */
212  numArrayKeys = 0;
213  for (i = 0; i < numberOfKeys; i++)
214  {
215  cur = &scan->keyData[i];
216  if (cur->sk_flags & SK_SEARCHARRAY)
217  {
218  numArrayKeys++;
219  Assert(!(cur->sk_flags & (SK_ROW_HEADER | SK_SEARCHNULL | SK_SEARCHNOTNULL)));
220  /* If any arrays are null as a whole, we can quit right now. */
221  if (cur->sk_flags & SK_ISNULL)
222  {
223  so->numArrayKeys = -1;
224  so->arrayKeyData = NULL;
225  return;
226  }
227  }
228  }
229 
230  /* Quit if nothing to do. */
231  if (numArrayKeys == 0)
232  {
233  so->numArrayKeys = 0;
234  so->arrayKeyData = NULL;
235  return;
236  }
237 
238  /*
239  * Make a scan-lifespan context to hold array-associated data, or reset it
240  * if we already have one from a previous rescan cycle.
241  */
242  if (so->arrayContext == NULL)
244  "BTree array context",
246  else
248 
249  oldContext = MemoryContextSwitchTo(so->arrayContext);
250 
251  /* Create modifiable copy of scan->keyData in the workspace context */
252  so->arrayKeyData = (ScanKey) palloc(scan->numberOfKeys * sizeof(ScanKeyData));
253  memcpy(so->arrayKeyData,
254  scan->keyData,
255  scan->numberOfKeys * sizeof(ScanKeyData));
256 
257  /* Allocate space for per-array data in the workspace context */
258  so->arrayKeys = (BTArrayKeyInfo *) palloc0(numArrayKeys * sizeof(BTArrayKeyInfo));
259 
260  /* Now process each array key */
261  numArrayKeys = 0;
262  for (i = 0; i < numberOfKeys; i++)
263  {
264  ArrayType *arrayval;
265  int16 elmlen;
266  bool elmbyval;
267  char elmalign;
268  int num_elems;
269  Datum *elem_values;
270  bool *elem_nulls;
271  int num_nonnulls;
272  int j;
273 
274  cur = &so->arrayKeyData[i];
275  if (!(cur->sk_flags & SK_SEARCHARRAY))
276  continue;
277 
278  /*
279  * First, deconstruct the array into elements. Anything allocated
280  * here (including a possibly detoasted array value) is in the
281  * workspace context.
282  */
283  arrayval = DatumGetArrayTypeP(cur->sk_argument);
284  /* We could cache this data, but not clear it's worth it */
286  &elmlen, &elmbyval, &elmalign);
287  deconstruct_array(arrayval,
288  ARR_ELEMTYPE(arrayval),
289  elmlen, elmbyval, elmalign,
290  &elem_values, &elem_nulls, &num_elems);
291 
292  /*
293  * Compress out any null elements. We can ignore them since we assume
294  * all btree operators are strict.
295  */
296  num_nonnulls = 0;
297  for (j = 0; j < num_elems; j++)
298  {
299  if (!elem_nulls[j])
300  elem_values[num_nonnulls++] = elem_values[j];
301  }
302 
303  /* We could pfree(elem_nulls) now, but not worth the cycles */
304 
305  /* If there's no non-nulls, the scan qual is unsatisfiable */
306  if (num_nonnulls == 0)
307  {
308  numArrayKeys = -1;
309  break;
310  }
311 
312  /*
313  * If the comparison operator is not equality, then the array qual
314  * degenerates to a simple comparison against the smallest or largest
315  * non-null array element, as appropriate.
316  */
317  switch (cur->sk_strategy)
318  {
321  cur->sk_argument =
324  elem_values, num_nonnulls);
325  continue;
327  /* proceed with rest of loop */
328  break;
331  cur->sk_argument =
334  elem_values, num_nonnulls);
335  continue;
336  default:
337  elog(ERROR, "unrecognized StrategyNumber: %d",
338  (int) cur->sk_strategy);
339  break;
340  }
341 
342  /*
343  * Sort the non-null elements and eliminate any duplicates. We must
344  * sort in the same ordering used by the index column, so that the
345  * successive primitive indexscans produce data in index order.
346  */
347  num_elems = _bt_sort_array_elements(scan, cur,
348  (indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0,
349  elem_values, num_nonnulls);
350 
351  /*
352  * And set up the BTArrayKeyInfo data.
353  */
354  so->arrayKeys[numArrayKeys].scan_key = i;
355  so->arrayKeys[numArrayKeys].num_elems = num_elems;
356  so->arrayKeys[numArrayKeys].elem_values = elem_values;
357  numArrayKeys++;
358  }
359 
360  so->numArrayKeys = numArrayKeys;
361 
362  MemoryContextSwitchTo(oldContext);
363 }
364 
365 /*
366  * _bt_find_extreme_element() -- get least or greatest array element
367  *
368  * scan and skey identify the index column, whose opfamily determines the
369  * comparison semantics. strat should be BTLessStrategyNumber to get the
370  * least element, or BTGreaterStrategyNumber to get the greatest.
371  */
372 static Datum
374  StrategyNumber strat,
375  Datum *elems, int nelems)
376 {
377  Relation rel = scan->indexRelation;
378  Oid elemtype,
379  cmp_op;
380  RegProcedure cmp_proc;
381  FmgrInfo flinfo;
382  Datum result;
383  int i;
384 
385  /*
386  * Determine the nominal datatype of the array elements. We have to
387  * support the convention that sk_subtype == InvalidOid means the opclass
388  * input type; this is a hack to simplify life for ScanKeyInit().
389  */
390  elemtype = skey->sk_subtype;
391  if (elemtype == InvalidOid)
392  elemtype = rel->rd_opcintype[skey->sk_attno - 1];
393 
394  /*
395  * Look up the appropriate comparison operator in the opfamily.
396  *
397  * Note: it's possible that this would fail, if the opfamily is
398  * incomplete, but it seems quite unlikely that an opfamily would omit
399  * non-cross-type comparison operators for any datatype that it supports
400  * at all.
401  */
402  cmp_op = get_opfamily_member(rel->rd_opfamily[skey->sk_attno - 1],
403  elemtype,
404  elemtype,
405  strat);
406  if (!OidIsValid(cmp_op))
407  elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
408  strat, elemtype, elemtype,
409  rel->rd_opfamily[skey->sk_attno - 1]);
410  cmp_proc = get_opcode(cmp_op);
411  if (!RegProcedureIsValid(cmp_proc))
412  elog(ERROR, "missing oprcode for operator %u", cmp_op);
413 
414  fmgr_info(cmp_proc, &flinfo);
415 
416  Assert(nelems > 0);
417  result = elems[0];
418  for (i = 1; i < nelems; i++)
419  {
420  if (DatumGetBool(FunctionCall2Coll(&flinfo,
421  skey->sk_collation,
422  elems[i],
423  result)))
424  result = elems[i];
425  }
426 
427  return result;
428 }
429 
430 /*
431  * _bt_sort_array_elements() -- sort and de-dup array elements
432  *
433  * The array elements are sorted in-place, and the new number of elements
434  * after duplicate removal is returned.
435  *
436  * scan and skey identify the index column, whose opfamily determines the
437  * comparison semantics. If reverse is true, we sort in descending order.
438  */
439 static int
441  bool reverse,
442  Datum *elems, int nelems)
443 {
444  Relation rel = scan->indexRelation;
445  Oid elemtype;
446  RegProcedure cmp_proc;
447  BTSortArrayContext cxt;
448 
449  if (nelems <= 1)
450  return nelems; /* no work to do */
451 
452  /*
453  * Determine the nominal datatype of the array elements. We have to
454  * support the convention that sk_subtype == InvalidOid means the opclass
455  * input type; this is a hack to simplify life for ScanKeyInit().
456  */
457  elemtype = skey->sk_subtype;
458  if (elemtype == InvalidOid)
459  elemtype = rel->rd_opcintype[skey->sk_attno - 1];
460 
461  /*
462  * Look up the appropriate comparison function in the opfamily.
463  *
464  * Note: it's possible that this would fail, if the opfamily is
465  * incomplete, but it seems quite unlikely that an opfamily would omit
466  * non-cross-type support functions for any datatype that it supports at
467  * all.
468  */
469  cmp_proc = get_opfamily_proc(rel->rd_opfamily[skey->sk_attno - 1],
470  elemtype,
471  elemtype,
472  BTORDER_PROC);
473  if (!RegProcedureIsValid(cmp_proc))
474  elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
475  BTORDER_PROC, elemtype, elemtype,
476  rel->rd_opfamily[skey->sk_attno - 1]);
477 
478  /* Sort the array elements */
479  fmgr_info(cmp_proc, &cxt.flinfo);
480  cxt.collation = skey->sk_collation;
481  cxt.reverse = reverse;
482  qsort_arg(elems, nelems, sizeof(Datum),
484 
485  /* Now scan the sorted elements and remove duplicates */
486  return qunique_arg(elems, nelems, sizeof(Datum),
488 }
489 
490 /*
491  * qsort_arg comparator for sorting array elements
492  */
493 static int
494 _bt_compare_array_elements(const void *a, const void *b, void *arg)
495 {
496  Datum da = *((const Datum *) a);
497  Datum db = *((const Datum *) b);
499  int32 compare;
500 
502  cxt->collation,
503  da, db));
504  if (cxt->reverse)
506  return compare;
507 }
508 
509 /*
510  * _bt_start_array_keys() -- Initialize array keys at start of a scan
511  *
512  * Set up the cur_elem counters and fill in the first sk_argument value for
513  * each array scankey. We can't do this until we know the scan direction.
514  */
515 void
517 {
518  BTScanOpaque so = (BTScanOpaque) scan->opaque;
519  int i;
520 
521  for (i = 0; i < so->numArrayKeys; i++)
522  {
523  BTArrayKeyInfo *curArrayKey = &so->arrayKeys[i];
524  ScanKey skey = &so->arrayKeyData[curArrayKey->scan_key];
525 
526  Assert(curArrayKey->num_elems > 0);
527  if (ScanDirectionIsBackward(dir))
528  curArrayKey->cur_elem = curArrayKey->num_elems - 1;
529  else
530  curArrayKey->cur_elem = 0;
531  skey->sk_argument = curArrayKey->elem_values[curArrayKey->cur_elem];
532  }
533 
534  so->arraysStarted = true;
535 }
536 
537 /*
538  * _bt_advance_array_keys() -- Advance to next set of array elements
539  *
540  * Returns true if there is another set of values to consider, false if not.
541  * On true result, the scankeys are initialized with the next set of values.
542  */
543 bool
545 {
546  BTScanOpaque so = (BTScanOpaque) scan->opaque;
547  bool found = false;
548  int i;
549 
550  /*
551  * We must advance the last array key most quickly, since it will
552  * correspond to the lowest-order index column among the available
553  * qualifications. This is necessary to ensure correct ordering of output
554  * when there are multiple array keys.
555  */
556  for (i = so->numArrayKeys - 1; i >= 0; i--)
557  {
558  BTArrayKeyInfo *curArrayKey = &so->arrayKeys[i];
559  ScanKey skey = &so->arrayKeyData[curArrayKey->scan_key];
560  int cur_elem = curArrayKey->cur_elem;
561  int num_elems = curArrayKey->num_elems;
562 
563  if (ScanDirectionIsBackward(dir))
564  {
565  if (--cur_elem < 0)
566  {
567  cur_elem = num_elems - 1;
568  found = false; /* need to advance next array key */
569  }
570  else
571  found = true;
572  }
573  else
574  {
575  if (++cur_elem >= num_elems)
576  {
577  cur_elem = 0;
578  found = false; /* need to advance next array key */
579  }
580  else
581  found = true;
582  }
583 
584  curArrayKey->cur_elem = cur_elem;
585  skey->sk_argument = curArrayKey->elem_values[cur_elem];
586  if (found)
587  break;
588  }
589 
590  /* advance parallel scan */
591  if (scan->parallel_scan != NULL)
593 
594  /*
595  * When no new array keys were found, the scan is "past the end" of the
596  * array keys. _bt_start_array_keys can still "restart" the array keys if
597  * a rescan is required.
598  */
599  if (!found)
600  so->arraysStarted = false;
601 
602  return found;
603 }
604 
605 /*
606  * _bt_mark_array_keys() -- Handle array keys during btmarkpos
607  *
608  * Save the current state of the array keys as the "mark" position.
609  */
610 void
612 {
613  BTScanOpaque so = (BTScanOpaque) scan->opaque;
614  int i;
615 
616  for (i = 0; i < so->numArrayKeys; i++)
617  {
618  BTArrayKeyInfo *curArrayKey = &so->arrayKeys[i];
619 
620  curArrayKey->mark_elem = curArrayKey->cur_elem;
621  }
622 }
623 
624 /*
625  * _bt_restore_array_keys() -- Handle array keys during btrestrpos
626  *
627  * Restore the array keys to where they were when the mark was set.
628  */
629 void
631 {
632  BTScanOpaque so = (BTScanOpaque) scan->opaque;
633  bool changed = false;
634  int i;
635 
636  /* Restore each array key to its position when the mark was set */
637  for (i = 0; i < so->numArrayKeys; i++)
638  {
639  BTArrayKeyInfo *curArrayKey = &so->arrayKeys[i];
640  ScanKey skey = &so->arrayKeyData[curArrayKey->scan_key];
641  int mark_elem = curArrayKey->mark_elem;
642 
643  if (curArrayKey->cur_elem != mark_elem)
644  {
645  curArrayKey->cur_elem = mark_elem;
646  skey->sk_argument = curArrayKey->elem_values[mark_elem];
647  changed = true;
648  }
649  }
650 
651  /*
652  * If we changed any keys, we must redo _bt_preprocess_keys. That might
653  * sound like overkill, but in cases with multiple keys per index column
654  * it seems necessary to do the full set of pushups.
655  *
656  * Also do this whenever the scan's set of array keys "wrapped around" at
657  * the end of the last primitive index scan. There won't have been a call
658  * to _bt_preprocess_keys from some other place following wrap around, so
659  * we do it for ourselves.
660  */
661  if (changed || !so->arraysStarted)
662  {
663  _bt_preprocess_keys(scan);
664  /* The mark should have been set on a consistent set of keys... */
665  Assert(so->qual_ok);
666  }
667 }
668 
669 
670 /*
671  * _bt_preprocess_keys() -- Preprocess scan keys
672  *
673  * The given search-type keys (in scan->keyData[] or so->arrayKeyData[])
674  * are copied to so->keyData[] with possible transformation.
675  * scan->numberOfKeys is the number of input keys, so->numberOfKeys gets
676  * the number of output keys (possibly less, never greater).
677  *
678  * The output keys are marked with additional sk_flags bits beyond the
679  * system-standard bits supplied by the caller. The DESC and NULLS_FIRST
680  * indoption bits for the relevant index attribute are copied into the flags.
681  * Also, for a DESC column, we commute (flip) all the sk_strategy numbers
682  * so that the index sorts in the desired direction.
683  *
684  * One key purpose of this routine is to discover which scan keys must be
685  * satisfied to continue the scan. It also attempts to eliminate redundant
686  * keys and detect contradictory keys. (If the index opfamily provides
687  * incomplete sets of cross-type operators, we may fail to detect redundant
688  * or contradictory keys, but we can survive that.)
689  *
690  * The output keys must be sorted by index attribute. Presently we expect
691  * (but verify) that the input keys are already so sorted --- this is done
692  * by match_clauses_to_index() in indxpath.c. Some reordering of the keys
693  * within each attribute may be done as a byproduct of the processing here,
694  * but no other code depends on that.
695  *
696  * The output keys are marked with flags SK_BT_REQFWD and/or SK_BT_REQBKWD
697  * if they must be satisfied in order to continue the scan forward or backward
698  * respectively. _bt_checkkeys uses these flags. For example, if the quals
699  * are "x = 1 AND y < 4 AND z < 5", then _bt_checkkeys will reject a tuple
700  * (1,2,7), but we must continue the scan in case there are tuples (1,3,z).
701  * But once we reach tuples like (1,4,z) we can stop scanning because no
702  * later tuples could match. This is reflected by marking the x and y keys,
703  * but not the z key, with SK_BT_REQFWD. In general, the keys for leading
704  * attributes with "=" keys are marked both SK_BT_REQFWD and SK_BT_REQBKWD.
705  * For the first attribute without an "=" key, any "<" and "<=" keys are
706  * marked SK_BT_REQFWD while any ">" and ">=" keys are marked SK_BT_REQBKWD.
707  * This can be seen to be correct by considering the above example. Note
708  * in particular that if there are no keys for a given attribute, the keys for
709  * subsequent attributes can never be required; for instance "WHERE y = 4"
710  * requires a full-index scan.
711  *
712  * If possible, redundant keys are eliminated: we keep only the tightest
713  * >/>= bound and the tightest </<= bound, and if there's an = key then
714  * that's the only one returned. (So, we return either a single = key,
715  * or one or two boundary-condition keys for each attr.) However, if we
716  * cannot compare two keys for lack of a suitable cross-type operator,
717  * we cannot eliminate either. If there are two such keys of the same
718  * operator strategy, the second one is just pushed into the output array
719  * without further processing here. We may also emit both >/>= or both
720  * </<= keys if we can't compare them. The logic about required keys still
721  * works if we don't eliminate redundant keys.
722  *
723  * Note that one reason we need direction-sensitive required-key flags is
724  * precisely that we may not be able to eliminate redundant keys. Suppose
725  * we have "x > 4::int AND x > 10::bigint", and we are unable to determine
726  * which key is more restrictive for lack of a suitable cross-type operator.
727  * _bt_first will arbitrarily pick one of the keys to do the initial
728  * positioning with. If it picks x > 4, then the x > 10 condition will fail
729  * until we reach index entries > 10; but we can't stop the scan just because
730  * x > 10 is failing. On the other hand, if we are scanning backwards, then
731  * failure of either key is indeed enough to stop the scan. (In general, when
732  * inequality keys are present, the initial-positioning code only promises to
733  * position before the first possible match, not exactly at the first match,
734  * for a forward scan; or after the last match for a backward scan.)
735  *
736  * As a byproduct of this work, we can detect contradictory quals such
737  * as "x = 1 AND x > 2". If we see that, we return so->qual_ok = false,
738  * indicating the scan need not be run at all since no tuples can match.
739  * (In this case we do not bother completing the output key array!)
740  * Again, missing cross-type operators might cause us to fail to prove the
741  * quals contradictory when they really are, but the scan will work correctly.
742  *
743  * Row comparison keys are currently also treated without any smarts:
744  * we just transfer them into the preprocessed array without any
745  * editorialization. We can treat them the same as an ordinary inequality
746  * comparison on the row's first index column, for the purposes of the logic
747  * about required keys.
748  *
749  * Note: the reason we have to copy the preprocessed scan keys into private
750  * storage is that we are modifying the array based on comparisons of the
751  * key argument values, which could change on a rescan or after moving to
752  * new elements of array keys. Therefore we can't overwrite the source data.
753  */
754 void
756 {
757  BTScanOpaque so = (BTScanOpaque) scan->opaque;
758  int numberOfKeys = scan->numberOfKeys;
759  int16 *indoption = scan->indexRelation->rd_indoption;
760  int new_numberOfKeys;
761  int numberOfEqualCols;
762  ScanKey inkeys;
763  ScanKey outkeys;
764  ScanKey cur;
766  bool test_result;
767  int i,
768  j;
769  AttrNumber attno;
770 
771  /* initialize result variables */
772  so->qual_ok = true;
773  so->numberOfKeys = 0;
774 
775  if (numberOfKeys < 1)
776  return; /* done if qual-less scan */
777 
778  /*
779  * Read so->arrayKeyData if array keys are present, else scan->keyData
780  */
781  if (so->arrayKeyData != NULL)
782  inkeys = so->arrayKeyData;
783  else
784  inkeys = scan->keyData;
785 
786  outkeys = so->keyData;
787  cur = &inkeys[0];
788  /* we check that input keys are correctly ordered */
789  if (cur->sk_attno < 1)
790  elog(ERROR, "btree index keys must be ordered by attribute");
791 
792  /* We can short-circuit most of the work if there's just one key */
793  if (numberOfKeys == 1)
794  {
795  /* Apply indoption to scankey (might change sk_strategy!) */
796  if (!_bt_fix_scankey_strategy(cur, indoption))
797  so->qual_ok = false;
798  memcpy(outkeys, cur, sizeof(ScanKeyData));
799  so->numberOfKeys = 1;
800  /* We can mark the qual as required if it's for first index col */
801  if (cur->sk_attno == 1)
802  _bt_mark_scankey_required(outkeys);
803  return;
804  }
805 
806  /*
807  * Otherwise, do the full set of pushups.
808  */
809  new_numberOfKeys = 0;
810  numberOfEqualCols = 0;
811 
812  /*
813  * Initialize for processing of keys for attr 1.
814  *
815  * xform[i] points to the currently best scan key of strategy type i+1; it
816  * is NULL if we haven't yet found such a key for this attr.
817  */
818  attno = 1;
819  memset(xform, 0, sizeof(xform));
820 
821  /*
822  * Loop iterates from 0 to numberOfKeys inclusive; we use the last pass to
823  * handle after-last-key processing. Actual exit from the loop is at the
824  * "break" statement below.
825  */
826  for (i = 0;; cur++, i++)
827  {
828  if (i < numberOfKeys)
829  {
830  /* Apply indoption to scankey (might change sk_strategy!) */
831  if (!_bt_fix_scankey_strategy(cur, indoption))
832  {
833  /* NULL can't be matched, so give up */
834  so->qual_ok = false;
835  return;
836  }
837  }
838 
839  /*
840  * If we are at the end of the keys for a particular attr, finish up
841  * processing and emit the cleaned-up keys.
842  */
843  if (i == numberOfKeys || cur->sk_attno != attno)
844  {
845  int priorNumberOfEqualCols = numberOfEqualCols;
846 
847  /* check input keys are correctly ordered */
848  if (i < numberOfKeys && cur->sk_attno < attno)
849  elog(ERROR, "btree index keys must be ordered by attribute");
850 
851  /*
852  * If = has been specified, all other keys can be eliminated as
853  * redundant. If we have a case like key = 1 AND key > 2, we can
854  * set qual_ok to false and abandon further processing.
855  *
856  * We also have to deal with the case of "key IS NULL", which is
857  * unsatisfiable in combination with any other index condition. By
858  * the time we get here, that's been classified as an equality
859  * check, and we've rejected any combination of it with a regular
860  * equality condition; but not with other types of conditions.
861  */
862  if (xform[BTEqualStrategyNumber - 1])
863  {
864  ScanKey eq = xform[BTEqualStrategyNumber - 1];
865 
866  for (j = BTMaxStrategyNumber; --j >= 0;)
867  {
868  ScanKey chk = xform[j];
869 
870  if (!chk || j == (BTEqualStrategyNumber - 1))
871  continue;
872 
873  if (eq->sk_flags & SK_SEARCHNULL)
874  {
875  /* IS NULL is contradictory to anything else */
876  so->qual_ok = false;
877  return;
878  }
879 
880  if (_bt_compare_scankey_args(scan, chk, eq, chk,
881  &test_result))
882  {
883  if (!test_result)
884  {
885  /* keys proven mutually contradictory */
886  so->qual_ok = false;
887  return;
888  }
889  /* else discard the redundant non-equality key */
890  xform[j] = NULL;
891  }
892  /* else, cannot determine redundancy, keep both keys */
893  }
894  /* track number of attrs for which we have "=" keys */
895  numberOfEqualCols++;
896  }
897 
898  /* try to keep only one of <, <= */
899  if (xform[BTLessStrategyNumber - 1]
900  && xform[BTLessEqualStrategyNumber - 1])
901  {
902  ScanKey lt = xform[BTLessStrategyNumber - 1];
903  ScanKey le = xform[BTLessEqualStrategyNumber - 1];
904 
905  if (_bt_compare_scankey_args(scan, le, lt, le,
906  &test_result))
907  {
908  if (test_result)
909  xform[BTLessEqualStrategyNumber - 1] = NULL;
910  else
911  xform[BTLessStrategyNumber - 1] = NULL;
912  }
913  }
914 
915  /* try to keep only one of >, >= */
916  if (xform[BTGreaterStrategyNumber - 1]
917  && xform[BTGreaterEqualStrategyNumber - 1])
918  {
919  ScanKey gt = xform[BTGreaterStrategyNumber - 1];
920  ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1];
921 
922  if (_bt_compare_scankey_args(scan, ge, gt, ge,
923  &test_result))
924  {
925  if (test_result)
926  xform[BTGreaterEqualStrategyNumber - 1] = NULL;
927  else
928  xform[BTGreaterStrategyNumber - 1] = NULL;
929  }
930  }
931 
932  /*
933  * Emit the cleaned-up keys into the outkeys[] array, and then
934  * mark them if they are required. They are required (possibly
935  * only in one direction) if all attrs before this one had "=".
936  */
937  for (j = BTMaxStrategyNumber; --j >= 0;)
938  {
939  if (xform[j])
940  {
941  ScanKey outkey = &outkeys[new_numberOfKeys++];
942 
943  memcpy(outkey, xform[j], sizeof(ScanKeyData));
944  if (priorNumberOfEqualCols == attno - 1)
946  }
947  }
948 
949  /*
950  * Exit loop here if done.
951  */
952  if (i == numberOfKeys)
953  break;
954 
955  /* Re-initialize for new attno */
956  attno = cur->sk_attno;
957  memset(xform, 0, sizeof(xform));
958  }
959 
960  /* check strategy this key's operator corresponds to */
961  j = cur->sk_strategy - 1;
962 
963  /* if row comparison, push it directly to the output array */
964  if (cur->sk_flags & SK_ROW_HEADER)
965  {
966  ScanKey outkey = &outkeys[new_numberOfKeys++];
967 
968  memcpy(outkey, cur, sizeof(ScanKeyData));
969  if (numberOfEqualCols == attno - 1)
971 
972  /*
973  * We don't support RowCompare using equality; such a qual would
974  * mess up the numberOfEqualCols tracking.
975  */
976  Assert(j != (BTEqualStrategyNumber - 1));
977  continue;
978  }
979 
980  /* have we seen one of these before? */
981  if (xform[j] == NULL)
982  {
983  /* nope, so remember this scankey */
984  xform[j] = cur;
985  }
986  else
987  {
988  /* yup, keep only the more restrictive key */
989  if (_bt_compare_scankey_args(scan, cur, cur, xform[j],
990  &test_result))
991  {
992  if (test_result)
993  xform[j] = cur;
994  else if (j == (BTEqualStrategyNumber - 1))
995  {
996  /* key == a && key == b, but a != b */
997  so->qual_ok = false;
998  return;
999  }
1000  /* else old key is more restrictive, keep it */
1001  }
1002  else
1003  {
1004  /*
1005  * We can't determine which key is more restrictive. Keep the
1006  * previous one in xform[j] and push this one directly to the
1007  * output array.
1008  */
1009  ScanKey outkey = &outkeys[new_numberOfKeys++];
1010 
1011  memcpy(outkey, cur, sizeof(ScanKeyData));
1012  if (numberOfEqualCols == attno - 1)
1013  _bt_mark_scankey_required(outkey);
1014  }
1015  }
1016  }
1017 
1018  so->numberOfKeys = new_numberOfKeys;
1019 }
1020 
1021 /*
1022  * Compare two scankey values using a specified operator.
1023  *
1024  * The test we want to perform is logically "leftarg op rightarg", where
1025  * leftarg and rightarg are the sk_argument values in those ScanKeys, and
1026  * the comparison operator is the one in the op ScanKey. However, in
1027  * cross-data-type situations we may need to look up the correct operator in
1028  * the index's opfamily: it is the one having amopstrategy = op->sk_strategy
1029  * and amoplefttype/amoprighttype equal to the two argument datatypes.
1030  *
1031  * If the opfamily doesn't supply a complete set of cross-type operators we
1032  * may not be able to make the comparison. If we can make the comparison
1033  * we store the operator result in *result and return true. We return false
1034  * if the comparison could not be made.
1035  *
1036  * Note: op always points at the same ScanKey as either leftarg or rightarg.
1037  * Since we don't scribble on the scankeys, this aliasing should cause no
1038  * trouble.
1039  *
1040  * Note: this routine needs to be insensitive to any DESC option applied
1041  * to the index column. For example, "x < 4" is a tighter constraint than
1042  * "x < 5" regardless of which way the index is sorted.
1043  */
1044 static bool
1046  ScanKey leftarg, ScanKey rightarg,
1047  bool *result)
1048 {
1049  Relation rel = scan->indexRelation;
1050  Oid lefttype,
1051  righttype,
1052  optype,
1053  opcintype,
1054  cmp_op;
1055  StrategyNumber strat;
1056 
1057  /*
1058  * First, deal with cases where one or both args are NULL. This should
1059  * only happen when the scankeys represent IS NULL/NOT NULL conditions.
1060  */
1061  if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL)
1062  {
1063  bool leftnull,
1064  rightnull;
1065 
1066  if (leftarg->sk_flags & SK_ISNULL)
1067  {
1069  leftnull = true;
1070  }
1071  else
1072  leftnull = false;
1073  if (rightarg->sk_flags & SK_ISNULL)
1074  {
1075  Assert(rightarg->sk_flags & (SK_SEARCHNULL | SK_SEARCHNOTNULL));
1076  rightnull = true;
1077  }
1078  else
1079  rightnull = false;
1080 
1081  /*
1082  * We treat NULL as either greater than or less than all other values.
1083  * Since true > false, the tests below work correctly for NULLS LAST
1084  * logic. If the index is NULLS FIRST, we need to flip the strategy.
1085  */
1086  strat = op->sk_strategy;
1087  if (op->sk_flags & SK_BT_NULLS_FIRST)
1088  strat = BTCommuteStrategyNumber(strat);
1089 
1090  switch (strat)
1091  {
1092  case BTLessStrategyNumber:
1093  *result = (leftnull < rightnull);
1094  break;
1096  *result = (leftnull <= rightnull);
1097  break;
1098  case BTEqualStrategyNumber:
1099  *result = (leftnull == rightnull);
1100  break;
1102  *result = (leftnull >= rightnull);
1103  break;
1105  *result = (leftnull > rightnull);
1106  break;
1107  default:
1108  elog(ERROR, "unrecognized StrategyNumber: %d", (int) strat);
1109  *result = false; /* keep compiler quiet */
1110  break;
1111  }
1112  return true;
1113  }
1114 
1115  /*
1116  * The opfamily we need to worry about is identified by the index column.
1117  */
1118  Assert(leftarg->sk_attno == rightarg->sk_attno);
1119 
1120  opcintype = rel->rd_opcintype[leftarg->sk_attno - 1];
1121 
1122  /*
1123  * Determine the actual datatypes of the ScanKey arguments. We have to
1124  * support the convention that sk_subtype == InvalidOid means the opclass
1125  * input type; this is a hack to simplify life for ScanKeyInit().
1126  */
1127  lefttype = leftarg->sk_subtype;
1128  if (lefttype == InvalidOid)
1129  lefttype = opcintype;
1130  righttype = rightarg->sk_subtype;
1131  if (righttype == InvalidOid)
1132  righttype = opcintype;
1133  optype = op->sk_subtype;
1134  if (optype == InvalidOid)
1135  optype = opcintype;
1136 
1137  /*
1138  * If leftarg and rightarg match the types expected for the "op" scankey,
1139  * we can use its already-looked-up comparison function.
1140  */
1141  if (lefttype == opcintype && righttype == optype)
1142  {
1143  *result = DatumGetBool(FunctionCall2Coll(&op->sk_func,
1144  op->sk_collation,
1145  leftarg->sk_argument,
1146  rightarg->sk_argument));
1147  return true;
1148  }
1149 
1150  /*
1151  * Otherwise, we need to go to the syscache to find the appropriate
1152  * operator. (This cannot result in infinite recursion, since no
1153  * indexscan initiated by syscache lookup will use cross-data-type
1154  * operators.)
1155  *
1156  * If the sk_strategy was flipped by _bt_fix_scankey_strategy, we have to
1157  * un-flip it to get the correct opfamily member.
1158  */
1159  strat = op->sk_strategy;
1160  if (op->sk_flags & SK_BT_DESC)
1161  strat = BTCommuteStrategyNumber(strat);
1162 
1163  cmp_op = get_opfamily_member(rel->rd_opfamily[leftarg->sk_attno - 1],
1164  lefttype,
1165  righttype,
1166  strat);
1167  if (OidIsValid(cmp_op))
1168  {
1169  RegProcedure cmp_proc = get_opcode(cmp_op);
1170 
1171  if (RegProcedureIsValid(cmp_proc))
1172  {
1173  *result = DatumGetBool(OidFunctionCall2Coll(cmp_proc,
1174  op->sk_collation,
1175  leftarg->sk_argument,
1176  rightarg->sk_argument));
1177  return true;
1178  }
1179  }
1180 
1181  /* Can't make the comparison */
1182  *result = false; /* suppress compiler warnings */
1183  return false;
1184 }
1185 
1186 /*
1187  * Adjust a scankey's strategy and flags setting as needed for indoptions.
1188  *
1189  * We copy the appropriate indoption value into the scankey sk_flags
1190  * (shifting to avoid clobbering system-defined flag bits). Also, if
1191  * the DESC option is set, commute (flip) the operator strategy number.
1192  *
1193  * A secondary purpose is to check for IS NULL/NOT NULL scankeys and set up
1194  * the strategy field correctly for them.
1195  *
1196  * Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
1197  * NULL comparison value. Since all btree operators are assumed strict,
1198  * a NULL means that the qual cannot be satisfied. We return true if the
1199  * comparison value isn't NULL, or false if the scan should be abandoned.
1200  *
1201  * This function is applied to the *input* scankey structure; therefore
1202  * on a rescan we will be looking at already-processed scankeys. Hence
1203  * we have to be careful not to re-commute the strategy if we already did it.
1204  * It's a bit ugly to modify the caller's copy of the scankey but in practice
1205  * there shouldn't be any problem, since the index's indoptions are certainly
1206  * not going to change while the scankey survives.
1207  */
1208 static bool
1210 {
1211  int addflags;
1212 
1213  addflags = indoption[skey->sk_attno - 1] << SK_BT_INDOPTION_SHIFT;
1214 
1215  /*
1216  * We treat all btree operators as strict (even if they're not so marked
1217  * in pg_proc). This means that it is impossible for an operator condition
1218  * with a NULL comparison constant to succeed, and we can reject it right
1219  * away.
1220  *
1221  * However, we now also support "x IS NULL" clauses as search conditions,
1222  * so in that case keep going. The planner has not filled in any
1223  * particular strategy in this case, so set it to BTEqualStrategyNumber
1224  * --- we can treat IS NULL as an equality operator for purposes of search
1225  * strategy.
1226  *
1227  * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
1228  * than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS
1229  * FIRST index.
1230  *
1231  * Note: someday we might have to fill in sk_collation from the index
1232  * column's collation. At the moment this is a non-issue because we'll
1233  * never actually call the comparison operator on a NULL.
1234  */
1235  if (skey->sk_flags & SK_ISNULL)
1236  {
1237  /* SK_ISNULL shouldn't be set in a row header scankey */
1238  Assert(!(skey->sk_flags & SK_ROW_HEADER));
1239 
1240  /* Set indoption flags in scankey (might be done already) */
1241  skey->sk_flags |= addflags;
1242 
1243  /* Set correct strategy for IS NULL or NOT NULL search */
1244  if (skey->sk_flags & SK_SEARCHNULL)
1245  {
1247  skey->sk_subtype = InvalidOid;
1248  skey->sk_collation = InvalidOid;
1249  }
1250  else if (skey->sk_flags & SK_SEARCHNOTNULL)
1251  {
1252  if (skey->sk_flags & SK_BT_NULLS_FIRST)
1254  else
1256  skey->sk_subtype = InvalidOid;
1257  skey->sk_collation = InvalidOid;
1258  }
1259  else
1260  {
1261  /* regular qual, so it cannot be satisfied */
1262  return false;
1263  }
1264 
1265  /* Needn't do the rest */
1266  return true;
1267  }
1268 
1269  /* Adjust strategy for DESC, if we didn't already */
1270  if ((addflags & SK_BT_DESC) && !(skey->sk_flags & SK_BT_DESC))
1272  skey->sk_flags |= addflags;
1273 
1274  /* If it's a row header, fix row member flags and strategies similarly */
1275  if (skey->sk_flags & SK_ROW_HEADER)
1276  {
1277  ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
1278 
1279  for (;;)
1280  {
1281  Assert(subkey->sk_flags & SK_ROW_MEMBER);
1282  addflags = indoption[subkey->sk_attno - 1] << SK_BT_INDOPTION_SHIFT;
1283  if ((addflags & SK_BT_DESC) && !(subkey->sk_flags & SK_BT_DESC))
1284  subkey->sk_strategy = BTCommuteStrategyNumber(subkey->sk_strategy);
1285  subkey->sk_flags |= addflags;
1286  if (subkey->sk_flags & SK_ROW_END)
1287  break;
1288  subkey++;
1289  }
1290  }
1291 
1292  return true;
1293 }
1294 
1295 /*
1296  * Mark a scankey as "required to continue the scan".
1297  *
1298  * Depending on the operator type, the key may be required for both scan
1299  * directions or just one. Also, if the key is a row comparison header,
1300  * we have to mark its first subsidiary ScanKey as required. (Subsequent
1301  * subsidiary ScanKeys are normally for lower-order columns, and thus
1302  * cannot be required, since they're after the first non-equality scankey.)
1303  *
1304  * Note: when we set required-key flag bits in a subsidiary scankey, we are
1305  * scribbling on a data structure belonging to the index AM's caller, not on
1306  * our private copy. This should be OK because the marking will not change
1307  * from scan to scan within a query, and so we'd just re-mark the same way
1308  * anyway on a rescan. Something to keep an eye on though.
1309  */
1310 static void
1312 {
1313  int addflags;
1314 
1315  switch (skey->sk_strategy)
1316  {
1317  case BTLessStrategyNumber:
1319  addflags = SK_BT_REQFWD;
1320  break;
1321  case BTEqualStrategyNumber:
1322  addflags = SK_BT_REQFWD | SK_BT_REQBKWD;
1323  break;
1326  addflags = SK_BT_REQBKWD;
1327  break;
1328  default:
1329  elog(ERROR, "unrecognized StrategyNumber: %d",
1330  (int) skey->sk_strategy);
1331  addflags = 0; /* keep compiler quiet */
1332  break;
1333  }
1334 
1335  skey->sk_flags |= addflags;
1336 
1337  if (skey->sk_flags & SK_ROW_HEADER)
1338  {
1339  ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
1340 
1341  /* First subkey should be same column/operator as the header */
1342  Assert(subkey->sk_flags & SK_ROW_MEMBER);
1343  Assert(subkey->sk_attno == skey->sk_attno);
1344  Assert(subkey->sk_strategy == skey->sk_strategy);
1345  subkey->sk_flags |= addflags;
1346  }
1347 }
1348 
1349 /*
1350  * Test whether an indextuple satisfies all the scankey conditions.
1351  *
1352  * Return true if so, false if not. If the tuple fails to pass the qual,
1353  * we also determine whether there's any need to continue the scan beyond
1354  * this tuple, and set *continuescan accordingly. See comments for
1355  * _bt_preprocess_keys(), above, about how this is done.
1356  *
1357  * Forward scan callers can pass a high key tuple in the hopes of having
1358  * us set *continuescan to false, and avoiding an unnecessary visit to
1359  * the page to the right.
1360  *
1361  * scan: index scan descriptor (containing a search-type scankey)
1362  * tuple: index tuple to test
1363  * tupnatts: number of attributes in tupnatts (high key may be truncated)
1364  * dir: direction we are scanning in
1365  * continuescan: output parameter (will be set correctly in all cases)
1366  * continuescanPrechecked: indicates that *continuescan flag is known to
1367  * be true for the last item on the page
1368  * haveFirstMatch: indicates that we already have at least one match
1369  * in the current page
1370  */
1371 bool
1372 _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple, int tupnatts,
1373  ScanDirection dir, bool *continuescan,
1374  bool continuescanPrechecked, bool haveFirstMatch)
1375 {
1376  TupleDesc tupdesc;
1377  BTScanOpaque so;
1378  int keysz;
1379  int ikey;
1380  ScanKey key;
1381 
1382  Assert(BTreeTupleGetNAtts(tuple, scan->indexRelation) == tupnatts);
1383 
1384  *continuescan = true; /* default assumption */
1385 
1386  tupdesc = RelationGetDescr(scan->indexRelation);
1387  so = (BTScanOpaque) scan->opaque;
1388  keysz = so->numberOfKeys;
1389 
1390  for (key = so->keyData, ikey = 0; ikey < keysz; key++, ikey++)
1391  {
1392  Datum datum;
1393  bool isNull;
1394  Datum test;
1395  bool requiredSameDir = false,
1396  requiredOppositeDir = false;
1397 
1398  /*
1399  * Check if the key is required for ordered scan in the same or
1400  * opposite direction. Save as flag variables for future usage.
1401  */
1402  if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsForward(dir)) ||
1403  ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsBackward(dir)))
1404  requiredSameDir = true;
1405  else if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsBackward(dir)) ||
1406  ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsForward(dir)))
1407  requiredOppositeDir = true;
1408 
1409  /*
1410  * If the caller told us the *continuescan flag is known to be true
1411  * for the last item on the page, then we know the keys required for
1412  * the current direction scan should be matched. Otherwise, the
1413  * *continuescan flag would be set for the current item and
1414  * subsequently the last item on the page accordingly.
1415  *
1416  * If the key is required for the opposite direction scan, we can skip
1417  * the check if the caller tells us there was already at least one
1418  * matching item on the page. Also, we require the *continuescan flag
1419  * to be true for the last item on the page to know there are no
1420  * NULLs.
1421  *
1422  * Both cases above work except for the row keys, where NULLs could be
1423  * found in the middle of matching values.
1424  */
1425  if ((requiredSameDir || (requiredOppositeDir && haveFirstMatch)) &&
1426  !(key->sk_flags & SK_ROW_HEADER) && continuescanPrechecked)
1427  continue;
1428 
1429  if (key->sk_attno > tupnatts)
1430  {
1431  /*
1432  * This attribute is truncated (must be high key). The value for
1433  * this attribute in the first non-pivot tuple on the page to the
1434  * right could be any possible value. Assume that truncated
1435  * attribute passes the qual.
1436  */
1438  Assert(BTreeTupleIsPivot(tuple));
1439  continue;
1440  }
1441 
1442  /* row-comparison keys need special processing */
1443  if (key->sk_flags & SK_ROW_HEADER)
1444  {
1445  if (_bt_check_rowcompare(key, tuple, tupnatts, tupdesc, dir,
1446  continuescan))
1447  continue;
1448  return false;
1449  }
1450 
1451  datum = index_getattr(tuple,
1452  key->sk_attno,
1453  tupdesc,
1454  &isNull);
1455 
1456  if (key->sk_flags & SK_ISNULL)
1457  {
1458  /* Handle IS NULL/NOT NULL tests */
1459  if (key->sk_flags & SK_SEARCHNULL)
1460  {
1461  if (isNull)
1462  continue; /* tuple satisfies this qual */
1463  }
1464  else
1465  {
1466  Assert(key->sk_flags & SK_SEARCHNOTNULL);
1467  if (!isNull)
1468  continue; /* tuple satisfies this qual */
1469  }
1470 
1471  /*
1472  * Tuple fails this qual. If it's a required qual for the current
1473  * scan direction, then we can conclude no further tuples will
1474  * pass, either.
1475  */
1476  if (requiredSameDir)
1477  *continuescan = false;
1478 
1479  /*
1480  * In any case, this indextuple doesn't match the qual.
1481  */
1482  return false;
1483  }
1484 
1485  if (isNull)
1486  {
1487  if (key->sk_flags & SK_BT_NULLS_FIRST)
1488  {
1489  /*
1490  * Since NULLs are sorted before non-NULLs, we know we have
1491  * reached the lower limit of the range of values for this
1492  * index attr. On a backward scan, we can stop if this qual
1493  * is one of the "must match" subset. We can stop regardless
1494  * of whether the qual is > or <, so long as it's required,
1495  * because it's not possible for any future tuples to pass. On
1496  * a forward scan, however, we must keep going, because we may
1497  * have initially positioned to the start of the index.
1498  */
1499  if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
1501  *continuescan = false;
1502  }
1503  else
1504  {
1505  /*
1506  * Since NULLs are sorted after non-NULLs, we know we have
1507  * reached the upper limit of the range of values for this
1508  * index attr. On a forward scan, we can stop if this qual is
1509  * one of the "must match" subset. We can stop regardless of
1510  * whether the qual is > or <, so long as it's required,
1511  * because it's not possible for any future tuples to pass. On
1512  * a backward scan, however, we must keep going, because we
1513  * may have initially positioned to the end of the index.
1514  */
1515  if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
1517  *continuescan = false;
1518  }
1519 
1520  /*
1521  * In any case, this indextuple doesn't match the qual.
1522  */
1523  return false;
1524  }
1525 
1526  /*
1527  * Apply the key-checking function. When the key is required for the
1528  * opposite direction scan, it must be already satisfied as soon as
1529  * there is already match on the page. Except for the NULLs checking,
1530  * which have already done above.
1531  */
1532  if (!(requiredOppositeDir && haveFirstMatch))
1533  {
1534  test = FunctionCall2Coll(&key->sk_func, key->sk_collation,
1535  datum, key->sk_argument);
1536  }
1537  else
1538  {
1539  test = true;
1540  Assert(test == FunctionCall2Coll(&key->sk_func, key->sk_collation,
1541  datum, key->sk_argument));
1542  }
1543 
1544  if (!DatumGetBool(test))
1545  {
1546  /*
1547  * Tuple fails this qual. If it's a required qual for the current
1548  * scan direction, then we can conclude no further tuples will
1549  * pass, either.
1550  *
1551  * Note: because we stop the scan as soon as any required equality
1552  * qual fails, it is critical that equality quals be used for the
1553  * initial positioning in _bt_first() when they are available. See
1554  * comments in _bt_first().
1555  */
1556  if (requiredSameDir)
1557  *continuescan = false;
1558 
1559  /*
1560  * In any case, this indextuple doesn't match the qual.
1561  */
1562  return false;
1563  }
1564  }
1565 
1566  /* If we get here, the tuple passes all index quals. */
1567  return true;
1568 }
1569 
1570 /*
1571  * Test whether an indextuple satisfies a row-comparison scan condition.
1572  *
1573  * Return true if so, false if not. If not, also clear *continuescan if
1574  * it's not possible for any future tuples in the current scan direction
1575  * to pass the qual.
1576  *
1577  * This is a subroutine for _bt_checkkeys, which see for more info.
1578  */
1579 static bool
1580 _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
1581  TupleDesc tupdesc, ScanDirection dir, bool *continuescan)
1582 {
1583  ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
1584  int32 cmpresult = 0;
1585  bool result;
1586 
1587  /* First subkey should be same as the header says */
1588  Assert(subkey->sk_attno == skey->sk_attno);
1589 
1590  /* Loop over columns of the row condition */
1591  for (;;)
1592  {
1593  Datum datum;
1594  bool isNull;
1595 
1596  Assert(subkey->sk_flags & SK_ROW_MEMBER);
1597 
1598  if (subkey->sk_attno > tupnatts)
1599  {
1600  /*
1601  * This attribute is truncated (must be high key). The value for
1602  * this attribute in the first non-pivot tuple on the page to the
1603  * right could be any possible value. Assume that truncated
1604  * attribute passes the qual.
1605  */
1607  Assert(BTreeTupleIsPivot(tuple));
1608  cmpresult = 0;
1609  if (subkey->sk_flags & SK_ROW_END)
1610  break;
1611  subkey++;
1612  continue;
1613  }
1614 
1615  datum = index_getattr(tuple,
1616  subkey->sk_attno,
1617  tupdesc,
1618  &isNull);
1619 
1620  if (isNull)
1621  {
1622  if (subkey->sk_flags & SK_BT_NULLS_FIRST)
1623  {
1624  /*
1625  * Since NULLs are sorted before non-NULLs, we know we have
1626  * reached the lower limit of the range of values for this
1627  * index attr. On a backward scan, we can stop if this qual
1628  * is one of the "must match" subset. We can stop regardless
1629  * of whether the qual is > or <, so long as it's required,
1630  * because it's not possible for any future tuples to pass. On
1631  * a forward scan, however, we must keep going, because we may
1632  * have initially positioned to the start of the index.
1633  */
1634  if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
1636  *continuescan = false;
1637  }
1638  else
1639  {
1640  /*
1641  * Since NULLs are sorted after non-NULLs, we know we have
1642  * reached the upper limit of the range of values for this
1643  * index attr. On a forward scan, we can stop if this qual is
1644  * one of the "must match" subset. We can stop regardless of
1645  * whether the qual is > or <, so long as it's required,
1646  * because it's not possible for any future tuples to pass. On
1647  * a backward scan, however, we must keep going, because we
1648  * may have initially positioned to the end of the index.
1649  */
1650  if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
1652  *continuescan = false;
1653  }
1654 
1655  /*
1656  * In any case, this indextuple doesn't match the qual.
1657  */
1658  return false;
1659  }
1660 
1661  if (subkey->sk_flags & SK_ISNULL)
1662  {
1663  /*
1664  * Unlike the simple-scankey case, this isn't a disallowed case.
1665  * But it can never match. If all the earlier row comparison
1666  * columns are required for the scan direction, we can stop the
1667  * scan, because there can't be another tuple that will succeed.
1668  */
1669  if (subkey != (ScanKey) DatumGetPointer(skey->sk_argument))
1670  subkey--;
1671  if ((subkey->sk_flags & SK_BT_REQFWD) &&
1673  *continuescan = false;
1674  else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
1676  *continuescan = false;
1677  return false;
1678  }
1679 
1680  /* Perform the test --- three-way comparison not bool operator */
1681  cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
1682  subkey->sk_collation,
1683  datum,
1684  subkey->sk_argument));
1685 
1686  if (subkey->sk_flags & SK_BT_DESC)
1687  INVERT_COMPARE_RESULT(cmpresult);
1688 
1689  /* Done comparing if unequal, else advance to next column */
1690  if (cmpresult != 0)
1691  break;
1692 
1693  if (subkey->sk_flags & SK_ROW_END)
1694  break;
1695  subkey++;
1696  }
1697 
1698  /*
1699  * At this point cmpresult indicates the overall result of the row
1700  * comparison, and subkey points to the deciding column (or the last
1701  * column if the result is "=").
1702  */
1703  switch (subkey->sk_strategy)
1704  {
1705  /* EQ and NE cases aren't allowed here */
1706  case BTLessStrategyNumber:
1707  result = (cmpresult < 0);
1708  break;
1710  result = (cmpresult <= 0);
1711  break;
1713  result = (cmpresult >= 0);
1714  break;
1716  result = (cmpresult > 0);
1717  break;
1718  default:
1719  elog(ERROR, "unrecognized RowCompareType: %d",
1720  (int) subkey->sk_strategy);
1721  result = 0; /* keep compiler quiet */
1722  break;
1723  }
1724 
1725  if (!result)
1726  {
1727  /*
1728  * Tuple fails this qual. If it's a required qual for the current
1729  * scan direction, then we can conclude no further tuples will pass,
1730  * either. Note we have to look at the deciding column, not
1731  * necessarily the first or last column of the row condition.
1732  */
1733  if ((subkey->sk_flags & SK_BT_REQFWD) &&
1735  *continuescan = false;
1736  else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
1738  *continuescan = false;
1739  }
1740 
1741  return result;
1742 }
1743 
1744 /*
1745  * _bt_killitems - set LP_DEAD state for items an indexscan caller has
1746  * told us were killed
1747  *
1748  * scan->opaque, referenced locally through so, contains information about the
1749  * current page and killed tuples thereon (generally, this should only be
1750  * called if so->numKilled > 0).
1751  *
1752  * The caller does not have a lock on the page and may or may not have the
1753  * page pinned in a buffer. Note that read-lock is sufficient for setting
1754  * LP_DEAD status (which is only a hint).
1755  *
1756  * We match items by heap TID before assuming they are the right ones to
1757  * delete. We cope with cases where items have moved right due to insertions.
1758  * If an item has moved off the current page due to a split, we'll fail to
1759  * find it and do nothing (this is not an error case --- we assume the item
1760  * will eventually get marked in a future indexscan).
1761  *
1762  * Note that if we hold a pin on the target page continuously from initially
1763  * reading the items until applying this function, VACUUM cannot have deleted
1764  * any items from the page, and so there is no need to search left from the
1765  * recorded offset. (This observation also guarantees that the item is still
1766  * the right one to delete, which might otherwise be questionable since heap
1767  * TIDs can get recycled.) This holds true even if the page has been modified
1768  * by inserts and page splits, so there is no need to consult the LSN.
1769  *
1770  * If the pin was released after reading the page, then we re-read it. If it
1771  * has been modified since we read it (as determined by the LSN), we dare not
1772  * flag any entries because it is possible that the old entry was vacuumed
1773  * away and the TID was re-used by a completely different heap tuple.
1774  */
1775 void
1777 {
1778  BTScanOpaque so = (BTScanOpaque) scan->opaque;
1779  Page page;
1780  BTPageOpaque opaque;
1781  OffsetNumber minoff;
1782  OffsetNumber maxoff;
1783  int i;
1784  int numKilled = so->numKilled;
1785  bool killedsomething = false;
1786  bool droppedpin PG_USED_FOR_ASSERTS_ONLY;
1787 
1789 
1790  /*
1791  * Always reset the scan state, so we don't look for same items on other
1792  * pages.
1793  */
1794  so->numKilled = 0;
1795 
1796  if (BTScanPosIsPinned(so->currPos))
1797  {
1798  /*
1799  * We have held the pin on this page since we read the index tuples,
1800  * so all we need to do is lock it. The pin will have prevented
1801  * re-use of any TID on the page, so there is no need to check the
1802  * LSN.
1803  */
1804  droppedpin = false;
1806 
1807  page = BufferGetPage(so->currPos.buf);
1808  }
1809  else
1810  {
1811  Buffer buf;
1812 
1813  droppedpin = true;
1814  /* Attempt to re-read the buffer, getting pin and lock. */
1816 
1817  page = BufferGetPage(buf);
1818  if (BufferGetLSNAtomic(buf) == so->currPos.lsn)
1819  so->currPos.buf = buf;
1820  else
1821  {
1822  /* Modified while not pinned means hinting is not safe. */
1823  _bt_relbuf(scan->indexRelation, buf);
1824  return;
1825  }
1826  }
1827 
1828  opaque = BTPageGetOpaque(page);
1829  minoff = P_FIRSTDATAKEY(opaque);
1830  maxoff = PageGetMaxOffsetNumber(page);
1831 
1832  for (i = 0; i < numKilled; i++)
1833  {
1834  int itemIndex = so->killedItems[i];
1835  BTScanPosItem *kitem = &so->currPos.items[itemIndex];
1836  OffsetNumber offnum = kitem->indexOffset;
1837 
1838  Assert(itemIndex >= so->currPos.firstItem &&
1839  itemIndex <= so->currPos.lastItem);
1840  if (offnum < minoff)
1841  continue; /* pure paranoia */
1842  while (offnum <= maxoff)
1843  {
1844  ItemId iid = PageGetItemId(page, offnum);
1845  IndexTuple ituple = (IndexTuple) PageGetItem(page, iid);
1846  bool killtuple = false;
1847 
1848  if (BTreeTupleIsPosting(ituple))
1849  {
1850  int pi = i + 1;
1851  int nposting = BTreeTupleGetNPosting(ituple);
1852  int j;
1853 
1854  /*
1855  * We rely on the convention that heap TIDs in the scanpos
1856  * items array are stored in ascending heap TID order for a
1857  * group of TIDs that originally came from a posting list
1858  * tuple. This convention even applies during backwards
1859  * scans, where returning the TIDs in descending order might
1860  * seem more natural. This is about effectiveness, not
1861  * correctness.
1862  *
1863  * Note that the page may have been modified in almost any way
1864  * since we first read it (in the !droppedpin case), so it's
1865  * possible that this posting list tuple wasn't a posting list
1866  * tuple when we first encountered its heap TIDs.
1867  */
1868  for (j = 0; j < nposting; j++)
1869  {
1870  ItemPointer item = BTreeTupleGetPostingN(ituple, j);
1871 
1872  if (!ItemPointerEquals(item, &kitem->heapTid))
1873  break; /* out of posting list loop */
1874 
1875  /*
1876  * kitem must have matching offnum when heap TIDs match,
1877  * though only in the common case where the page can't
1878  * have been concurrently modified
1879  */
1880  Assert(kitem->indexOffset == offnum || !droppedpin);
1881 
1882  /*
1883  * Read-ahead to later kitems here.
1884  *
1885  * We rely on the assumption that not advancing kitem here
1886  * will prevent us from considering the posting list tuple
1887  * fully dead by not matching its next heap TID in next
1888  * loop iteration.
1889  *
1890  * If, on the other hand, this is the final heap TID in
1891  * the posting list tuple, then tuple gets killed
1892  * regardless (i.e. we handle the case where the last
1893  * kitem is also the last heap TID in the last index tuple
1894  * correctly -- posting tuple still gets killed).
1895  */
1896  if (pi < numKilled)
1897  kitem = &so->currPos.items[so->killedItems[pi++]];
1898  }
1899 
1900  /*
1901  * Don't bother advancing the outermost loop's int iterator to
1902  * avoid processing killed items that relate to the same
1903  * offnum/posting list tuple. This micro-optimization hardly
1904  * seems worth it. (Further iterations of the outermost loop
1905  * will fail to match on this same posting list's first heap
1906  * TID instead, so we'll advance to the next offnum/index
1907  * tuple pretty quickly.)
1908  */
1909  if (j == nposting)
1910  killtuple = true;
1911  }
1912  else if (ItemPointerEquals(&ituple->t_tid, &kitem->heapTid))
1913  killtuple = true;
1914 
1915  /*
1916  * Mark index item as dead, if it isn't already. Since this
1917  * happens while holding a buffer lock possibly in shared mode,
1918  * it's possible that multiple processes attempt to do this
1919  * simultaneously, leading to multiple full-page images being sent
1920  * to WAL (if wal_log_hints or data checksums are enabled), which
1921  * is undesirable.
1922  */
1923  if (killtuple && !ItemIdIsDead(iid))
1924  {
1925  /* found the item/all posting list items */
1926  ItemIdMarkDead(iid);
1927  killedsomething = true;
1928  break; /* out of inner search loop */
1929  }
1930  offnum = OffsetNumberNext(offnum);
1931  }
1932  }
1933 
1934  /*
1935  * Since this can be redone later if needed, mark as dirty hint.
1936  *
1937  * Whenever we mark anything LP_DEAD, we also set the page's
1938  * BTP_HAS_GARBAGE flag, which is likewise just a hint. (Note that we
1939  * only rely on the page-level flag in !heapkeyspace indexes.)
1940  */
1941  if (killedsomething)
1942  {
1943  opaque->btpo_flags |= BTP_HAS_GARBAGE;
1944  MarkBufferDirtyHint(so->currPos.buf, true);
1945  }
1946 
1947  _bt_unlockbuf(scan->indexRelation, so->currPos.buf);
1948 }
1949 
1950 
1951 /*
1952  * The following routines manage a shared-memory area in which we track
1953  * assignment of "vacuum cycle IDs" to currently-active btree vacuuming
1954  * operations. There is a single counter which increments each time we
1955  * start a vacuum to assign it a cycle ID. Since multiple vacuums could
1956  * be active concurrently, we have to track the cycle ID for each active
1957  * vacuum; this requires at most MaxBackends entries (usually far fewer).
1958  * We assume at most one vacuum can be active for a given index.
1959  *
1960  * Access to the shared memory area is controlled by BtreeVacuumLock.
1961  * In principle we could use a separate lmgr locktag for each index,
1962  * but a single LWLock is much cheaper, and given the short time that
1963  * the lock is ever held, the concurrency hit should be minimal.
1964  */
1965 
1966 typedef struct BTOneVacInfo
1967 {
1968  LockRelId relid; /* global identifier of an index */
1969  BTCycleId cycleid; /* cycle ID for its active VACUUM */
1971 
1972 typedef struct BTVacInfo
1973 {
1974  BTCycleId cycle_ctr; /* cycle ID most recently assigned */
1975  int num_vacuums; /* number of currently active VACUUMs */
1976  int max_vacuums; /* allocated length of vacuums[] array */
1979 
1981 
1982 
1983 /*
1984  * _bt_vacuum_cycleid --- get the active vacuum cycle ID for an index,
1985  * or zero if there is no active VACUUM
1986  *
1987  * Note: for correct interlocking, the caller must already hold pin and
1988  * exclusive lock on each buffer it will store the cycle ID into. This
1989  * ensures that even if a VACUUM starts immediately afterwards, it cannot
1990  * process those pages until the page split is complete.
1991  */
1992 BTCycleId
1994 {
1995  BTCycleId result = 0;
1996  int i;
1997 
1998  /* Share lock is enough since this is a read-only operation */
1999  LWLockAcquire(BtreeVacuumLock, LW_SHARED);
2000 
2001  for (i = 0; i < btvacinfo->num_vacuums; i++)
2002  {
2003  BTOneVacInfo *vac = &btvacinfo->vacuums[i];
2004 
2005  if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
2006  vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
2007  {
2008  result = vac->cycleid;
2009  break;
2010  }
2011  }
2012 
2013  LWLockRelease(BtreeVacuumLock);
2014  return result;
2015 }
2016 
2017 /*
2018  * _bt_start_vacuum --- assign a cycle ID to a just-starting VACUUM operation
2019  *
2020  * Note: the caller must guarantee that it will eventually call
2021  * _bt_end_vacuum, else we'll permanently leak an array slot. To ensure
2022  * that this happens even in elog(FATAL) scenarios, the appropriate coding
2023  * is not just a PG_TRY, but
2024  * PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel))
2025  */
2026 BTCycleId
2028 {
2029  BTCycleId result;
2030  int i;
2031  BTOneVacInfo *vac;
2032 
2033  LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
2034 
2035  /*
2036  * Assign the next cycle ID, being careful to avoid zero as well as the
2037  * reserved high values.
2038  */
2039  result = ++(btvacinfo->cycle_ctr);
2040  if (result == 0 || result > MAX_BT_CYCLE_ID)
2041  result = btvacinfo->cycle_ctr = 1;
2042 
2043  /* Let's just make sure there's no entry already for this index */
2044  for (i = 0; i < btvacinfo->num_vacuums; i++)
2045  {
2046  vac = &btvacinfo->vacuums[i];
2047  if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
2048  vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
2049  {
2050  /*
2051  * Unlike most places in the backend, we have to explicitly
2052  * release our LWLock before throwing an error. This is because
2053  * we expect _bt_end_vacuum() to be called before transaction
2054  * abort cleanup can run to release LWLocks.
2055  */
2056  LWLockRelease(BtreeVacuumLock);
2057  elog(ERROR, "multiple active vacuums for index \"%s\"",
2059  }
2060  }
2061 
2062  /* OK, add an entry */
2064  {
2065  LWLockRelease(BtreeVacuumLock);
2066  elog(ERROR, "out of btvacinfo slots");
2067  }
2069  vac->relid = rel->rd_lockInfo.lockRelId;
2070  vac->cycleid = result;
2072 
2073  LWLockRelease(BtreeVacuumLock);
2074  return result;
2075 }
2076 
2077 /*
2078  * _bt_end_vacuum --- mark a btree VACUUM operation as done
2079  *
2080  * Note: this is deliberately coded not to complain if no entry is found;
2081  * this allows the caller to put PG_TRY around the start_vacuum operation.
2082  */
2083 void
2085 {
2086  int i;
2087 
2088  LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
2089 
2090  /* Find the array entry */
2091  for (i = 0; i < btvacinfo->num_vacuums; i++)
2092  {
2093  BTOneVacInfo *vac = &btvacinfo->vacuums[i];
2094 
2095  if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
2096  vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
2097  {
2098  /* Remove it by shifting down the last entry */
2099  *vac = btvacinfo->vacuums[btvacinfo->num_vacuums - 1];
2101  break;
2102  }
2103  }
2104 
2105  LWLockRelease(BtreeVacuumLock);
2106 }
2107 
2108 /*
2109  * _bt_end_vacuum wrapped as an on_shmem_exit callback function
2110  */
2111 void
2113 {
2115 }
2116 
2117 /*
2118  * BTreeShmemSize --- report amount of shared memory space needed
2119  */
2120 Size
2122 {
2123  Size size;
2124 
2125  size = offsetof(BTVacInfo, vacuums);
2127  return size;
2128 }
2129 
2130 /*
2131  * BTreeShmemInit --- initialize this module's shared memory
2132  */
2133 void
2135 {
2136  bool found;
2137 
2138  btvacinfo = (BTVacInfo *) ShmemInitStruct("BTree Vacuum State",
2139  BTreeShmemSize(),
2140  &found);
2141 
2142  if (!IsUnderPostmaster)
2143  {
2144  /* Initialize shared memory area */
2145  Assert(!found);
2146 
2147  /*
2148  * It doesn't really matter what the cycle counter starts at, but
2149  * having it always start the same doesn't seem good. Seed with
2150  * low-order bits of time() instead.
2151  */
2152  btvacinfo->cycle_ctr = (BTCycleId) time(NULL);
2153 
2154  btvacinfo->num_vacuums = 0;
2156  }
2157  else
2158  Assert(found);
2159 }
2160 
2161 bytea *
2162 btoptions(Datum reloptions, bool validate)
2163 {
2164  static const relopt_parse_elt tab[] = {
2165  {"fillfactor", RELOPT_TYPE_INT, offsetof(BTOptions, fillfactor)},
2166  {"vacuum_cleanup_index_scale_factor", RELOPT_TYPE_REAL,
2167  offsetof(BTOptions, vacuum_cleanup_index_scale_factor)},
2168  {"deduplicate_items", RELOPT_TYPE_BOOL,
2169  offsetof(BTOptions, deduplicate_items)}
2170  };
2171 
2172  return (bytea *) build_reloptions(reloptions, validate,
2174  sizeof(BTOptions),
2175  tab, lengthof(tab));
2176 }
2177 
2178 /*
2179  * btproperty() -- Check boolean properties of indexes.
2180  *
2181  * This is optional, but handling AMPROP_RETURNABLE here saves opening the rel
2182  * to call btcanreturn.
2183  */
2184 bool
2185 btproperty(Oid index_oid, int attno,
2186  IndexAMProperty prop, const char *propname,
2187  bool *res, bool *isnull)
2188 {
2189  switch (prop)
2190  {
2191  case AMPROP_RETURNABLE:
2192  /* answer only for columns, not AM or whole index */
2193  if (attno == 0)
2194  return false;
2195  /* otherwise, btree can always return data */
2196  *res = true;
2197  return true;
2198 
2199  default:
2200  return false; /* punt to generic code */
2201  }
2202 }
2203 
2204 /*
2205  * btbuildphasename() -- Return name of index build phase.
2206  */
2207 char *
2208 btbuildphasename(int64 phasenum)
2209 {
2210  switch (phasenum)
2211  {
2213  return "initializing";
2215  return "scanning table";
2217  return "sorting live tuples";
2219  return "sorting dead tuples";
2221  return "loading tuples in tree";
2222  default:
2223  return NULL;
2224  }
2225 }
2226 
2227 /*
2228  * _bt_truncate() -- create tuple without unneeded suffix attributes.
2229  *
2230  * Returns truncated pivot index tuple allocated in caller's memory context,
2231  * with key attributes copied from caller's firstright argument. If rel is
2232  * an INCLUDE index, non-key attributes will definitely be truncated away,
2233  * since they're not part of the key space. More aggressive suffix
2234  * truncation can take place when it's clear that the returned tuple does not
2235  * need one or more suffix key attributes. We only need to keep firstright
2236  * attributes up to and including the first non-lastleft-equal attribute.
2237  * Caller's insertion scankey is used to compare the tuples; the scankey's
2238  * argument values are not considered here.
2239  *
2240  * Note that returned tuple's t_tid offset will hold the number of attributes
2241  * present, so the original item pointer offset is not represented. Caller
2242  * should only change truncated tuple's downlink. Note also that truncated
2243  * key attributes are treated as containing "minus infinity" values by
2244  * _bt_compare().
2245  *
2246  * In the worst case (when a heap TID must be appended to distinguish lastleft
2247  * from firstright), the size of the returned tuple is the size of firstright
2248  * plus the size of an additional MAXALIGN()'d item pointer. This guarantee
2249  * is important, since callers need to stay under the 1/3 of a page
2250  * restriction on tuple size. If this routine is ever taught to truncate
2251  * within an attribute/datum, it will need to avoid returning an enlarged
2252  * tuple to caller when truncation + TOAST compression ends up enlarging the
2253  * final datum.
2254  */
2255 IndexTuple
2256 _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright,
2257  BTScanInsert itup_key)
2258 {
2259  TupleDesc itupdesc = RelationGetDescr(rel);
2261  int keepnatts;
2262  IndexTuple pivot;
2263  IndexTuple tidpivot;
2264  ItemPointer pivotheaptid;
2265  Size newsize;
2266 
2267  /*
2268  * We should only ever truncate non-pivot tuples from leaf pages. It's
2269  * never okay to truncate when splitting an internal page.
2270  */
2271  Assert(!BTreeTupleIsPivot(lastleft) && !BTreeTupleIsPivot(firstright));
2272 
2273  /* Determine how many attributes must be kept in truncated tuple */
2274  keepnatts = _bt_keep_natts(rel, lastleft, firstright, itup_key);
2275 
2276 #ifdef DEBUG_NO_TRUNCATE
2277  /* Force truncation to be ineffective for testing purposes */
2278  keepnatts = nkeyatts + 1;
2279 #endif
2280 
2281  pivot = index_truncate_tuple(itupdesc, firstright,
2282  Min(keepnatts, nkeyatts));
2283 
2284  if (BTreeTupleIsPosting(pivot))
2285  {
2286  /*
2287  * index_truncate_tuple() just returns a straight copy of firstright
2288  * when it has no attributes to truncate. When that happens, we may
2289  * need to truncate away a posting list here instead.
2290  */
2291  Assert(keepnatts == nkeyatts || keepnatts == nkeyatts + 1);
2292  Assert(IndexRelationGetNumberOfAttributes(rel) == nkeyatts);
2293  pivot->t_info &= ~INDEX_SIZE_MASK;
2294  pivot->t_info |= MAXALIGN(BTreeTupleGetPostingOffset(firstright));
2295  }
2296 
2297  /*
2298  * If there is a distinguishing key attribute within pivot tuple, we're
2299  * done
2300  */
2301  if (keepnatts <= nkeyatts)
2302  {
2303  BTreeTupleSetNAtts(pivot, keepnatts, false);
2304  return pivot;
2305  }
2306 
2307  /*
2308  * We have to store a heap TID in the new pivot tuple, since no non-TID
2309  * key attribute value in firstright distinguishes the right side of the
2310  * split from the left side. nbtree conceptualizes this case as an
2311  * inability to truncate away any key attributes, since heap TID is
2312  * treated as just another key attribute (despite lacking a pg_attribute
2313  * entry).
2314  *
2315  * Use enlarged space that holds a copy of pivot. We need the extra space
2316  * to store a heap TID at the end (using the special pivot tuple
2317  * representation). Note that the original pivot already has firstright's
2318  * possible posting list/non-key attribute values removed at this point.
2319  */
2320  newsize = MAXALIGN(IndexTupleSize(pivot)) + MAXALIGN(sizeof(ItemPointerData));
2321  tidpivot = palloc0(newsize);
2322  memcpy(tidpivot, pivot, MAXALIGN(IndexTupleSize(pivot)));
2323  /* Cannot leak memory here */
2324  pfree(pivot);
2325 
2326  /*
2327  * Store all of firstright's key attribute values plus a tiebreaker heap
2328  * TID value in enlarged pivot tuple
2329  */
2330  tidpivot->t_info &= ~INDEX_SIZE_MASK;
2331  tidpivot->t_info |= newsize;
2332  BTreeTupleSetNAtts(tidpivot, nkeyatts, true);
2333  pivotheaptid = BTreeTupleGetHeapTID(tidpivot);
2334 
2335  /*
2336  * Lehman & Yao use lastleft as the leaf high key in all cases, but don't
2337  * consider suffix truncation. It seems like a good idea to follow that
2338  * example in cases where no truncation takes place -- use lastleft's heap
2339  * TID. (This is also the closest value to negative infinity that's
2340  * legally usable.)
2341  */
2342  ItemPointerCopy(BTreeTupleGetMaxHeapTID(lastleft), pivotheaptid);
2343 
2344  /*
2345  * We're done. Assert() that heap TID invariants hold before returning.
2346  *
2347  * Lehman and Yao require that the downlink to the right page, which is to
2348  * be inserted into the parent page in the second phase of a page split be
2349  * a strict lower bound on items on the right page, and a non-strict upper
2350  * bound for items on the left page. Assert that heap TIDs follow these
2351  * invariants, since a heap TID value is apparently needed as a
2352  * tiebreaker.
2353  */
2354 #ifndef DEBUG_NO_TRUNCATE
2356  BTreeTupleGetHeapTID(firstright)) < 0);
2357  Assert(ItemPointerCompare(pivotheaptid,
2358  BTreeTupleGetHeapTID(lastleft)) >= 0);
2359  Assert(ItemPointerCompare(pivotheaptid,
2360  BTreeTupleGetHeapTID(firstright)) < 0);
2361 #else
2362 
2363  /*
2364  * Those invariants aren't guaranteed to hold for lastleft + firstright
2365  * heap TID attribute values when they're considered here only because
2366  * DEBUG_NO_TRUNCATE is defined (a heap TID is probably not actually
2367  * needed as a tiebreaker). DEBUG_NO_TRUNCATE must therefore use a heap
2368  * TID value that always works as a strict lower bound for items to the
2369  * right. In particular, it must avoid using firstright's leading key
2370  * attribute values along with lastleft's heap TID value when lastleft's
2371  * TID happens to be greater than firstright's TID.
2372  */
2373  ItemPointerCopy(BTreeTupleGetHeapTID(firstright), pivotheaptid);
2374 
2375  /*
2376  * Pivot heap TID should never be fully equal to firstright. Note that
2377  * the pivot heap TID will still end up equal to lastleft's heap TID when
2378  * that's the only usable value.
2379  */
2380  ItemPointerSetOffsetNumber(pivotheaptid,
2382  Assert(ItemPointerCompare(pivotheaptid,
2383  BTreeTupleGetHeapTID(firstright)) < 0);
2384 #endif
2385 
2386  return tidpivot;
2387 }
2388 
2389 /*
2390  * _bt_keep_natts - how many key attributes to keep when truncating.
2391  *
2392  * Caller provides two tuples that enclose a split point. Caller's insertion
2393  * scankey is used to compare the tuples; the scankey's argument values are
2394  * not considered here.
2395  *
2396  * This can return a number of attributes that is one greater than the
2397  * number of key attributes for the index relation. This indicates that the
2398  * caller must use a heap TID as a unique-ifier in new pivot tuple.
2399  */
2400 static int
2401 _bt_keep_natts(Relation rel, IndexTuple lastleft, IndexTuple firstright,
2402  BTScanInsert itup_key)
2403 {
2404  int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
2405  TupleDesc itupdesc = RelationGetDescr(rel);
2406  int keepnatts;
2407  ScanKey scankey;
2408 
2409  /*
2410  * _bt_compare() treats truncated key attributes as having the value minus
2411  * infinity, which would break searches within !heapkeyspace indexes. We
2412  * must still truncate away non-key attribute values, though.
2413  */
2414  if (!itup_key->heapkeyspace)
2415  return nkeyatts;
2416 
2417  scankey = itup_key->scankeys;
2418  keepnatts = 1;
2419  for (int attnum = 1; attnum <= nkeyatts; attnum++, scankey++)
2420  {
2421  Datum datum1,
2422  datum2;
2423  bool isNull1,
2424  isNull2;
2425 
2426  datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
2427  datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
2428 
2429  if (isNull1 != isNull2)
2430  break;
2431 
2432  if (!isNull1 &&
2434  scankey->sk_collation,
2435  datum1,
2436  datum2)) != 0)
2437  break;
2438 
2439  keepnatts++;
2440  }
2441 
2442  /*
2443  * Assert that _bt_keep_natts_fast() agrees with us in passing. This is
2444  * expected in an allequalimage index.
2445  */
2446  Assert(!itup_key->allequalimage ||
2447  keepnatts == _bt_keep_natts_fast(rel, lastleft, firstright));
2448 
2449  return keepnatts;
2450 }
2451 
2452 /*
2453  * _bt_keep_natts_fast - fast bitwise variant of _bt_keep_natts.
2454  *
2455  * This is exported so that a candidate split point can have its effect on
2456  * suffix truncation inexpensively evaluated ahead of time when finding a
2457  * split location. A naive bitwise approach to datum comparisons is used to
2458  * save cycles.
2459  *
2460  * The approach taken here usually provides the same answer as _bt_keep_natts
2461  * will (for the same pair of tuples from a heapkeyspace index), since the
2462  * majority of btree opclasses can never indicate that two datums are equal
2463  * unless they're bitwise equal after detoasting. When an index only has
2464  * "equal image" columns, routine is guaranteed to give the same result as
2465  * _bt_keep_natts would.
2466  *
2467  * Callers can rely on the fact that attributes considered equal here are
2468  * definitely also equal according to _bt_keep_natts, even when the index uses
2469  * an opclass or collation that is not "allequalimage"/deduplication-safe.
2470  * This weaker guarantee is good enough for nbtsplitloc.c caller, since false
2471  * negatives generally only have the effect of making leaf page splits use a
2472  * more balanced split point.
2473  */
2474 int
2476 {
2477  TupleDesc itupdesc = RelationGetDescr(rel);
2478  int keysz = IndexRelationGetNumberOfKeyAttributes(rel);
2479  int keepnatts;
2480 
2481  keepnatts = 1;
2482  for (int attnum = 1; attnum <= keysz; attnum++)
2483  {
2484  Datum datum1,
2485  datum2;
2486  bool isNull1,
2487  isNull2;
2488  Form_pg_attribute att;
2489 
2490  datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
2491  datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
2492  att = TupleDescAttr(itupdesc, attnum - 1);
2493 
2494  if (isNull1 != isNull2)
2495  break;
2496 
2497  if (!isNull1 &&
2498  !datum_image_eq(datum1, datum2, att->attbyval, att->attlen))
2499  break;
2500 
2501  keepnatts++;
2502  }
2503 
2504  return keepnatts;
2505 }
2506 
2507 /*
2508  * _bt_check_natts() -- Verify tuple has expected number of attributes.
2509  *
2510  * Returns value indicating if the expected number of attributes were found
2511  * for a particular offset on page. This can be used as a general purpose
2512  * sanity check.
2513  *
2514  * Testing a tuple directly with BTreeTupleGetNAtts() should generally be
2515  * preferred to calling here. That's usually more convenient, and is always
2516  * more explicit. Call here instead when offnum's tuple may be a negative
2517  * infinity tuple that uses the pre-v11 on-disk representation, or when a low
2518  * context check is appropriate. This routine is as strict as possible about
2519  * what is expected on each version of btree.
2520  */
2521 bool
2522 _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
2523 {
2526  BTPageOpaque opaque = BTPageGetOpaque(page);
2527  IndexTuple itup;
2528  int tupnatts;
2529 
2530  /*
2531  * We cannot reliably test a deleted or half-dead page, since they have
2532  * dummy high keys
2533  */
2534  if (P_IGNORE(opaque))
2535  return true;
2536 
2537  Assert(offnum >= FirstOffsetNumber &&
2538  offnum <= PageGetMaxOffsetNumber(page));
2539 
2540  itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
2541  tupnatts = BTreeTupleGetNAtts(itup, rel);
2542 
2543  /* !heapkeyspace indexes do not support deduplication */
2544  if (!heapkeyspace && BTreeTupleIsPosting(itup))
2545  return false;
2546 
2547  /* Posting list tuples should never have "pivot heap TID" bit set */
2548  if (BTreeTupleIsPosting(itup) &&
2550  BT_PIVOT_HEAP_TID_ATTR) != 0)
2551  return false;
2552 
2553  /* INCLUDE indexes do not support deduplication */
2554  if (natts != nkeyatts && BTreeTupleIsPosting(itup))
2555  return false;
2556 
2557  if (P_ISLEAF(opaque))
2558  {
2559  if (offnum >= P_FIRSTDATAKEY(opaque))
2560  {
2561  /*
2562  * Non-pivot tuple should never be explicitly marked as a pivot
2563  * tuple
2564  */
2565  if (BTreeTupleIsPivot(itup))
2566  return false;
2567 
2568  /*
2569  * Leaf tuples that are not the page high key (non-pivot tuples)
2570  * should never be truncated. (Note that tupnatts must have been
2571  * inferred, even with a posting list tuple, because only pivot
2572  * tuples store tupnatts directly.)
2573  */
2574  return tupnatts == natts;
2575  }
2576  else
2577  {
2578  /*
2579  * Rightmost page doesn't contain a page high key, so tuple was
2580  * checked above as ordinary leaf tuple
2581  */
2582  Assert(!P_RIGHTMOST(opaque));
2583 
2584  /*
2585  * !heapkeyspace high key tuple contains only key attributes. Note
2586  * that tupnatts will only have been explicitly represented in
2587  * !heapkeyspace indexes that happen to have non-key attributes.
2588  */
2589  if (!heapkeyspace)
2590  return tupnatts == nkeyatts;
2591 
2592  /* Use generic heapkeyspace pivot tuple handling */
2593  }
2594  }
2595  else /* !P_ISLEAF(opaque) */
2596  {
2597  if (offnum == P_FIRSTDATAKEY(opaque))
2598  {
2599  /*
2600  * The first tuple on any internal page (possibly the first after
2601  * its high key) is its negative infinity tuple. Negative
2602  * infinity tuples are always truncated to zero attributes. They
2603  * are a particular kind of pivot tuple.
2604  */
2605  if (heapkeyspace)
2606  return tupnatts == 0;
2607 
2608  /*
2609  * The number of attributes won't be explicitly represented if the
2610  * negative infinity tuple was generated during a page split that
2611  * occurred with a version of Postgres before v11. There must be
2612  * a problem when there is an explicit representation that is
2613  * non-zero, or when there is no explicit representation and the
2614  * tuple is evidently not a pre-pg_upgrade tuple.
2615  *
2616  * Prior to v11, downlinks always had P_HIKEY as their offset.
2617  * Accept that as an alternative indication of a valid
2618  * !heapkeyspace negative infinity tuple.
2619  */
2620  return tupnatts == 0 ||
2622  }
2623  else
2624  {
2625  /*
2626  * !heapkeyspace downlink tuple with separator key contains only
2627  * key attributes. Note that tupnatts will only have been
2628  * explicitly represented in !heapkeyspace indexes that happen to
2629  * have non-key attributes.
2630  */
2631  if (!heapkeyspace)
2632  return tupnatts == nkeyatts;
2633 
2634  /* Use generic heapkeyspace pivot tuple handling */
2635  }
2636  }
2637 
2638  /* Handle heapkeyspace pivot tuples (excluding minus infinity items) */
2639  Assert(heapkeyspace);
2640 
2641  /*
2642  * Explicit representation of the number of attributes is mandatory with
2643  * heapkeyspace index pivot tuples, regardless of whether or not there are
2644  * non-key attributes.
2645  */
2646  if (!BTreeTupleIsPivot(itup))
2647  return false;
2648 
2649  /* Pivot tuple should not use posting list representation (redundant) */
2650  if (BTreeTupleIsPosting(itup))
2651  return false;
2652 
2653  /*
2654  * Heap TID is a tiebreaker key attribute, so it cannot be untruncated
2655  * when any other key attribute is truncated
2656  */
2657  if (BTreeTupleGetHeapTID(itup) != NULL && tupnatts != nkeyatts)
2658  return false;
2659 
2660  /*
2661  * Pivot tuple must have at least one untruncated key attribute (minus
2662  * infinity pivot tuples are the only exception). Pivot tuples can never
2663  * represent that there is a value present for a key attribute that
2664  * exceeds pg_index.indnkeyatts for the index.
2665  */
2666  return tupnatts > 0 && tupnatts <= nkeyatts;
2667 }
2668 
2669 /*
2670  *
2671  * _bt_check_third_page() -- check whether tuple fits on a btree page at all.
2672  *
2673  * We actually need to be able to fit three items on every page, so restrict
2674  * any one item to 1/3 the per-page available space. Note that itemsz should
2675  * not include the ItemId overhead.
2676  *
2677  * It might be useful to apply TOAST methods rather than throw an error here.
2678  * Using out of line storage would break assumptions made by suffix truncation
2679  * and by contrib/amcheck, though.
2680  */
2681 void
2682 _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace,
2683  Page page, IndexTuple newtup)
2684 {
2685  Size itemsz;
2686  BTPageOpaque opaque;
2687 
2688  itemsz = MAXALIGN(IndexTupleSize(newtup));
2689 
2690  /* Double check item size against limit */
2691  if (itemsz <= BTMaxItemSize(page))
2692  return;
2693 
2694  /*
2695  * Tuple is probably too large to fit on page, but it's possible that the
2696  * index uses version 2 or version 3, or that page is an internal page, in
2697  * which case a slightly higher limit applies.
2698  */
2699  if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid(page))
2700  return;
2701 
2702  /*
2703  * Internal page insertions cannot fail here, because that would mean that
2704  * an earlier leaf level insertion that should have failed didn't
2705  */
2706  opaque = BTPageGetOpaque(page);
2707  if (!P_ISLEAF(opaque))
2708  elog(ERROR, "cannot insert oversized tuple of size %zu on internal page of index \"%s\"",
2709  itemsz, RelationGetRelationName(rel));
2710 
2711  ereport(ERROR,
2712  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2713  errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"",
2714  itemsz,
2715  needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION,
2716  needheaptidspace ? BTMaxItemSize(page) :
2717  BTMaxItemSizeNoHeapTid(page),
2719  errdetail("Index row references tuple (%u,%u) in relation \"%s\".",
2722  RelationGetRelationName(heap)),
2723  errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
2724  "Consider a function index of an MD5 hash of the value, "
2725  "or use full text indexing."),
2727 }
2728 
2729 /*
2730  * Are all attributes in rel "equality is image equality" attributes?
2731  *
2732  * We use each attribute's BTEQUALIMAGE_PROC opclass procedure. If any
2733  * opclass either lacks a BTEQUALIMAGE_PROC procedure or returns false, we
2734  * return false; otherwise we return true.
2735  *
2736  * Returned boolean value is stored in index metapage during index builds.
2737  * Deduplication can only be used when we return true.
2738  */
2739 bool
2740 _bt_allequalimage(Relation rel, bool debugmessage)
2741 {
2742  bool allequalimage = true;
2743 
2744  /* INCLUDE indexes can never support deduplication */
2747  return false;
2748 
2749  for (int i = 0; i < IndexRelationGetNumberOfKeyAttributes(rel); i++)
2750  {
2751  Oid opfamily = rel->rd_opfamily[i];
2752  Oid opcintype = rel->rd_opcintype[i];
2753  Oid collation = rel->rd_indcollation[i];
2754  Oid equalimageproc;
2755 
2756  equalimageproc = get_opfamily_proc(opfamily, opcintype, opcintype,
2758 
2759  /*
2760  * If there is no BTEQUALIMAGE_PROC then deduplication is assumed to
2761  * be unsafe. Otherwise, actually call proc and see what it says.
2762  */
2763  if (!OidIsValid(equalimageproc) ||
2764  !DatumGetBool(OidFunctionCall1Coll(equalimageproc, collation,
2765  ObjectIdGetDatum(opcintype))))
2766  {
2767  allequalimage = false;
2768  break;
2769  }
2770  }
2771 
2772  if (debugmessage)
2773  {
2774  if (allequalimage)
2775  elog(DEBUG1, "index \"%s\" can safely use deduplication",
2777  else
2778  elog(DEBUG1, "index \"%s\" cannot use deduplication",
2780  }
2781 
2782  return allequalimage;
2783 }
IndexAMProperty
Definition: amapi.h:35
@ AMPROP_RETURNABLE
Definition: amapi.h:43
#define DatumGetArrayTypeP(X)
Definition: array.h:261
#define ARR_ELEMTYPE(a)
Definition: array.h:292
void deconstruct_array(ArrayType *array, Oid elmtype, int elmlen, bool elmbyval, char elmalign, Datum **elemsp, bool **nullsp, int *nelemsp)
Definition: arrayfuncs.c:3612
int16 AttrNumber
Definition: attnum.h:21
int Buffer
Definition: buf.h:23
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:3638
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:4624
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:350
Pointer Page
Definition: bufpage.h:78
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
#define RegProcedureIsValid(p)
Definition: c.h:764
#define Min(x, y)
Definition: c.h:991
#define INVERT_COMPARE_RESULT(var)
Definition: c.h:1093
signed short int16
Definition: c.h:480
#define MAXALIGN(LEN)
Definition: c.h:798
signed int int32
Definition: c.h:481
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:169
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:385
regproc RegProcedure
Definition: c.h:637
#define lengthof(array)
Definition: c.h:775
#define OidIsValid(objectId)
Definition: c.h:762
size_t Size
Definition: c.h:592
bool datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen)
Definition: datum.c:266
struct cursor * cur
Definition: ecpg.c:28
int errdetail(const char *fmt,...)
Definition: elog.c:1205
int errhint(const char *fmt,...)
Definition: elog.c:1319
int errcode(int sqlerrcode)
Definition: elog.c:859
int errmsg(const char *fmt,...)
Definition: elog.c:1072
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:224
#define ereport(elevel,...)
Definition: elog.h:149
Datum OidFunctionCall2Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:1421
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:1149
void fmgr_info(Oid functionId, FmgrInfo *finfo)
Definition: fmgr.c:127
Datum OidFunctionCall1Coll(Oid functionId, Oid collation, Datum arg1)
Definition: fmgr.c:1411
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
bool IsUnderPostmaster
Definition: globals.c:116
int MaxBackends
Definition: globals.c:142
FmgrInfo * index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:863
IndexTuple index_truncate_tuple(TupleDesc sourceDescriptor, IndexTuple source, int leavenatts)
Definition: indextuple.c:576
int b
Definition: isn.c:70
int a
Definition: isn.c:69
int j
Definition: isn.c:74
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
#define ItemIdMarkDead(itemId)
Definition: itemid.h:179
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:51
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:35
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static OffsetNumber ItemPointerGetOffsetNumberNoCheck(const ItemPointerData *pointer)
Definition: itemptr.h:114
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
Definition: itemptr.h:172
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:117
#define INDEX_SIZE_MASK
Definition: itup.h:65
Assert(fmt[strlen(fmt) - 1] !='\n')
void get_typlenbyvalalign(Oid typid, int16 *typlen, bool *typbyval, char *typalign)
Definition: lsyscache.c:2227
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:796
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1263
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:166
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1172
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1785
@ LW_SHARED
Definition: lwlock.h:117
@ LW_EXCLUSIVE
Definition: lwlock.h:116
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:326
void pfree(void *pointer)
Definition: mcxt.c:1401
void * palloc0(Size size)
Definition: mcxt.c:1227
MemoryContext CurrentMemoryContext
Definition: mcxt.c:131
void * palloc(Size size)
Definition: mcxt.c:1197
#define AllocSetContextCreate
Definition: memutils.h:128
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:162
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1023
void _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
Definition: nbtpage.c:739
Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access)
Definition: nbtpage.c:845
void _bt_unlockbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1070
void _bt_lockbuf(Relation rel, Buffer buf, int access)
Definition: nbtpage.c:1039
void _bt_parallel_advance_array_keys(IndexScanDesc scan)
Definition: nbtree.c:746
#define BTScanPosIsPinned(scanpos)
Definition: nbtree.h:990
#define BT_PIVOT_HEAP_TID_ATTR
Definition: nbtree.h:465
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:518
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:480
#define P_ISLEAF(opaque)
Definition: nbtree.h:220
#define P_HIKEY
Definition: nbtree.h:367
#define PROGRESS_BTREE_PHASE_PERFORMSORT_2
Definition: nbtree.h:1118
#define BTMaxItemSizeNoHeapTid(page)
Definition: nbtree.h:169
#define PROGRESS_BTREE_PHASE_LEAF_LOAD
Definition: nbtree.h:1119
#define BTP_HAS_GARBAGE
Definition: nbtree.h:82
#define BTEQUALIMAGE_PROC
Definition: nbtree.h:710
#define BTORDER_PROC
Definition: nbtree.h:707
#define BTPageGetOpaque(page)
Definition: nbtree.h:73
#define BTREE_VERSION
Definition: nbtree.h:150
#define BTScanPosIsValid(scanpos)
Definition: nbtree.h:1007
#define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN
Definition: nbtree.h:1116
#define SK_BT_INDOPTION_SHIFT
Definition: nbtree.h:1085
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:369
#define MAX_BT_CYCLE_ID
Definition: nbtree.h:93
#define PROGRESS_BTREE_PHASE_PERFORMSORT_1
Definition: nbtree.h:1117
uint16 BTCycleId
Definition: nbtree.h:29
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition: nbtree.h:529
#define SK_BT_REQBKWD
Definition: nbtree.h:1084
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:219
#define BTMaxItemSize(page)
Definition: nbtree.h:164
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1087
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:544
#define BT_READ
Definition: nbtree.h:719
#define SK_BT_REQFWD
Definition: nbtree.h:1083
#define SK_BT_DESC
Definition: nbtree.h:1086
#define P_IGNORE(opaque)
Definition: nbtree.h:225
#define BTCommuteStrategyNumber(strat)
Definition: nbtree.h:685
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:664
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:492
#define BTREE_NOVAC_VERSION
Definition: nbtree.h:152
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:638
static void BTreeTupleSetNAtts(IndexTuple itup, uint16 nkeyatts, bool heaptid)
Definition: nbtree.h:595
#define BTreeTupleGetNAtts(itup, rel)
Definition: nbtree.h:577
BTScanOpaqueData * BTScanOpaque
Definition: nbtree.h:1076
void _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, Page page, IndexTuple newtup)
Definition: nbtutils.c:2682
void _bt_end_vacuum(Relation rel)
Definition: nbtutils.c:2084
static Datum _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey, StrategyNumber strat, Datum *elems, int nelems)
Definition: nbtutils.c:373
char * btbuildphasename(int64 phasenum)
Definition: nbtutils.c:2208
void _bt_end_vacuum_callback(int code, Datum arg)
Definition: nbtutils.c:2112
void _bt_freestack(BTStack stack)
Definition: nbtutils.c:173
void BTreeShmemInit(void)
Definition: nbtutils.c:2134
static int _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey, bool reverse, Datum *elems, int nelems)
Definition: nbtutils.c:440
struct BTSortArrayContext BTSortArrayContext
bytea * btoptions(Datum reloptions, bool validate)
Definition: nbtutils.c:2162
bool _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:544
struct BTVacInfo BTVacInfo
BTCycleId _bt_vacuum_cycleid(Relation rel)
Definition: nbtutils.c:1993
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:81
void _bt_killitems(IndexScanDesc scan)
Definition: nbtutils.c:1776
static bool _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
Definition: nbtutils.c:1209
void _bt_preprocess_array_keys(IndexScanDesc scan)
Definition: nbtutils.c:201
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition: nbtutils.c:2522
IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition: nbtutils.c:2256
bool _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple, int tupnatts, ScanDirection dir, bool *continuescan, bool continuescanPrechecked, bool haveFirstMatch)
Definition: nbtutils.c:1372
int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
Definition: nbtutils.c:2475
static BTVacInfo * btvacinfo
Definition: nbtutils.c:1980
static bool _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op, ScanKey leftarg, ScanKey rightarg, bool *result)
Definition: nbtutils.c:1045
void _bt_restore_array_keys(IndexScanDesc scan)
Definition: nbtutils.c:630
static void _bt_mark_scankey_required(ScanKey skey)
Definition: nbtutils.c:1311
static int _bt_compare_array_elements(const void *a, const void *b, void *arg)
Definition: nbtutils.c:494
Size BTreeShmemSize(void)
Definition: nbtutils.c:2121
void _bt_mark_array_keys(IndexScanDesc scan)
Definition: nbtutils.c:611
static int _bt_keep_natts(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition: nbtutils.c:2401
bool btproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull)
Definition: nbtutils.c:2185
bool _bt_allequalimage(Relation rel, bool debugmessage)
Definition: nbtutils.c:2740
struct BTOneVacInfo BTOneVacInfo
void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:516
void _bt_preprocess_keys(IndexScanDesc scan)
Definition: nbtutils.c:755
static bool _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts, TupleDesc tupdesc, ScanDirection dir, bool *continuescan)
Definition: nbtutils.c:1580
BTCycleId _bt_start_vacuum(Relation rel)
Definition: nbtutils.c:2027
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
int16 attnum
Definition: pg_attribute.h:74
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:209
void * arg
static char * buf
Definition: pg_test_fsync.c:73
int fillfactor
Definition: pgbench.c:187
void qsort_arg(void *base, size_t nel, size_t elsize, qsort_arg_comparator cmp, void *arg)
static bool DatumGetBool(Datum X)
Definition: postgres.h:90
uintptr_t Datum
Definition: postgres.h:64
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:252
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:202
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
static void test(void)
#define PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE
Definition: progress.h:106
static size_t qunique_arg(void *array, size_t elements, size_t width, int(*compare)(const void *, const void *, void *), void *arg)
Definition: qunique.h:46
#define RelationGetDescr(relation)
Definition: rel.h:530
#define RelationGetRelationName(relation)
Definition: rel.h:538
#define IndexRelationGetNumberOfAttributes(relation)
Definition: rel.h:516
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:523
int errtableconstraint(Relation rel, const char *conname)
Definition: relcache.c:5979
void * build_reloptions(Datum reloptions, bool validate, relopt_kind kind, Size relopt_struct_size, const relopt_parse_elt *relopt_elems, int num_relopt_elems)
Definition: reloptions.c:1908
@ RELOPT_KIND_BTREE
Definition: reloptions.h:44
@ RELOPT_TYPE_INT
Definition: reloptions.h:32
@ RELOPT_TYPE_BOOL
Definition: reloptions.h:31
@ RELOPT_TYPE_REAL
Definition: reloptions.h:33
void ScanKeyEntryInitializeWithInfo(ScanKey entry, int flags, AttrNumber attributeNumber, StrategyNumber strategy, Oid subtype, Oid collation, FmgrInfo *finfo, Datum argument)
Definition: scankey.c:101
#define ScanDirectionIsForward(direction)
Definition: sdir.h:64
#define ScanDirectionIsBackward(direction)
Definition: sdir.h:50
ScanDirection
Definition: sdir.h:25
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510
#define SK_ROW_HEADER
Definition: skey.h:117
#define SK_SEARCHARRAY
Definition: skey.h:120
#define SK_ROW_MEMBER
Definition: skey.h:118
#define SK_SEARCHNOTNULL
Definition: skey.h:122
#define SK_SEARCHNULL
Definition: skey.h:121
#define SK_ROW_END
Definition: skey.h:119
ScanKeyData * ScanKey
Definition: skey.h:75
#define SK_ISNULL
Definition: skey.h:115
static pg_noinline void Size size
Definition: slab.c:607
uint16 StrategyNumber
Definition: stratnum.h:22
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define InvalidStrategy
Definition: stratnum.h:24
#define BTMaxStrategyNumber
Definition: stratnum.h:35
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define BTLessEqualStrategyNumber
Definition: stratnum.h:30
#define BTGreaterEqualStrategyNumber
Definition: stratnum.h:32
Datum * elem_values
Definition: nbtree.h:1029
BTCycleId cycleid
Definition: nbtutils.c:1969
LockRelId relid
Definition: nbtutils.c:1968
bool allequalimage
Definition: nbtree.h:787
bool heapkeyspace
Definition: nbtree.h:786
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:793
BTArrayKeyInfo * arrayKeys
Definition: nbtree.h:1047
bool arraysStarted
Definition: nbtree.h:1041
BTScanPosData currPos
Definition: nbtree.h:1072
int * killedItems
Definition: nbtree.h:1051
ScanKey arrayKeyData
Definition: nbtree.h:1040
ScanKey keyData
Definition: nbtree.h:1037
MemoryContext arrayContext
Definition: nbtree.h:1048
Buffer buf
Definition: nbtree.h:953
BlockNumber currPage
Definition: nbtree.h:956
int firstItem
Definition: nbtree.h:981
BTScanPosItem items[MaxTIDsPerBTreePage]
Definition: nbtree.h:985
XLogRecPtr lsn
Definition: nbtree.h:955
ItemPointerData heapTid
Definition: nbtree.h:946
OffsetNumber indexOffset
Definition: nbtree.h:947
FmgrInfo flinfo
Definition: nbtutils.c:35
struct BTStackData * bts_parent
Definition: nbtree.h:736
BTCycleId cycle_ctr
Definition: nbtutils.c:1974
int num_vacuums
Definition: nbtutils.c:1975
BTOneVacInfo vacuums[FLEXIBLE_ARRAY_MEMBER]
Definition: nbtutils.c:1977
int max_vacuums
Definition: nbtutils.c:1976
Definition: fmgr.h:57
struct ScanKeyData * keyData
Definition: relscan.h:122
struct ParallelIndexScanDescData * parallel_scan
Definition: relscan.h:166
Relation indexRelation
Definition: relscan.h:118
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49
LockRelId lockRelId
Definition: rel.h:46
Definition: rel.h:39
Oid relId
Definition: rel.h:40
Oid dbId
Definition: rel.h:41
LockInfoData rd_lockInfo
Definition: rel.h:114
Oid * rd_opcintype
Definition: rel.h:207
int16 * rd_indoption
Definition: rel.h:210
Form_pg_index rd_index
Definition: rel.h:191
Oid * rd_opfamily
Definition: rel.h:206
Oid * rd_indcollation
Definition: rel.h:216
int sk_flags
Definition: skey.h:66
Datum sk_argument
Definition: skey.h:72
FmgrInfo sk_func
Definition: skey.h:71
Oid sk_subtype
Definition: skey.h:69
Oid sk_collation
Definition: skey.h:70
StrategyNumber sk_strategy
Definition: skey.h:68
AttrNumber sk_attno
Definition: skey.h:67
Definition: c.h:674
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92