PostgreSQL Source Code  git master
tuplesortvariants.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * tuplesortvariants.c
4  * Implementation of tuple sorting variants.
5  *
6  * This module handles the sorting of heap tuples, index tuples, or single
7  * Datums. The implementation is based on the generalized tuple sorting
8  * facility given in tuplesort.c. Support other kinds of sortable objects
9  * could be easily added here, another module, or even an extension.
10  *
11  *
12  * Copyright (c) 2022-2024, PostgreSQL Global Development Group
13  *
14  * IDENTIFICATION
15  * src/backend/utils/sort/tuplesortvariants.c
16  *
17  *-------------------------------------------------------------------------
18  */
19 
20 #include "postgres.h"
21 
22 #include "access/brin_tuple.h"
23 #include "access/hash.h"
24 #include "access/htup_details.h"
25 #include "access/nbtree.h"
26 #include "catalog/index.h"
27 #include "executor/executor.h"
28 #include "pg_trace.h"
29 #include "utils/datum.h"
30 #include "utils/lsyscache.h"
31 #include "utils/guc.h"
32 #include "utils/tuplesort.h"
33 
34 
35 /* sort-type codes for sort__start probes */
36 #define HEAP_SORT 0
37 #define INDEX_SORT 1
38 #define DATUM_SORT 2
39 #define CLUSTER_SORT 3
40 
41 static void removeabbrev_heap(Tuplesortstate *state, SortTuple *stups,
42  int count);
44  int count);
46  int count);
48  int count);
50  int count);
51 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
53 static int comparetup_heap_tiebreak(const SortTuple *a, const SortTuple *b,
55 static void writetup_heap(Tuplesortstate *state, LogicalTape *tape,
56  SortTuple *stup);
57 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
58  LogicalTape *tape, unsigned int len);
59 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
61 static int comparetup_cluster_tiebreak(const SortTuple *a, const SortTuple *b,
64  SortTuple *stup);
65 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
66  LogicalTape *tape, unsigned int tuplen);
67 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
69 static int comparetup_index_btree_tiebreak(const SortTuple *a, const SortTuple *b,
71 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
73 static int comparetup_index_hash_tiebreak(const SortTuple *a, const SortTuple *b,
75 static int comparetup_index_brin(const SortTuple *a, const SortTuple *b,
78  SortTuple *stup);
79 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
80  LogicalTape *tape, unsigned int len);
82  SortTuple *stup);
84  LogicalTape *tape, unsigned int len);
85 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
87 static int comparetup_datum_tiebreak(const SortTuple *a, const SortTuple *b,
90  SortTuple *stup);
91 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
92  LogicalTape *tape, unsigned int len);
94 
95 /*
96  * Data structure pointed by "TuplesortPublic.arg" for the CLUSTER case. Set by
97  * the tuplesort_begin_cluster.
98  */
99 typedef struct
100 {
102 
103  IndexInfo *indexInfo; /* info about index being used for reference */
104  EState *estate; /* for evaluating index expressions */
106 
107 /*
108  * Data structure pointed by "TuplesortPublic.arg" for the IndexTuple case.
109  * Set by tuplesort_begin_index_xxx and used only by the IndexTuple routines.
110  */
111 typedef struct
112 {
113  Relation heapRel; /* table the index is being built on */
114  Relation indexRel; /* index being built */
116 
117 /*
118  * Data structure pointed by "TuplesortPublic.arg" for the index_btree subcase.
119  */
120 typedef struct
121 {
123 
124  bool enforceUnique; /* complain if we find duplicate tuples */
125  bool uniqueNullsNotDistinct; /* unique constraint null treatment */
127 
128 /*
129  * Data structure pointed by "TuplesortPublic.arg" for the index_hash subcase.
130  */
131 typedef struct
132 {
134 
135  uint32 high_mask; /* masks for sortable part of hash code */
139 
140 /*
141  * Data structure pointed by "TuplesortPublic.arg" for the Datum case.
142  * Set by tuplesort_begin_datum and used only by the DatumTuple routines.
143  */
144 typedef struct
145 {
146  /* the datatype oid of Datum's to be sorted */
148  /* we need typelen in order to know how to copy the Datums. */
151 
152 /*
153  * Computing BrinTuple size with only the tuple is difficult, so we want to track
154  * the length referenced by the SortTuple. That's what BrinSortTuple is meant
155  * to do - it's essentially a BrinTuple prefixed by its length.
156  */
157 typedef struct BrinSortTuple
158 {
162 
163 /* Size of the BrinSortTuple, given length of the BrinTuple. */
164 #define BRINSORTTUPLE_SIZE(len) (offsetof(BrinSortTuple, tuple) + (len))
165 
166 
169  int nkeys, AttrNumber *attNums,
170  Oid *sortOperators, Oid *sortCollations,
171  bool *nullsFirstFlags,
172  int workMem, SortCoordinate coordinate, int sortopt)
173 {
174  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
175  sortopt);
177  MemoryContext oldcontext;
178  int i;
179 
180  oldcontext = MemoryContextSwitchTo(base->maincontext);
181 
182  Assert(nkeys > 0);
183 
184 #ifdef TRACE_SORT
185  if (trace_sort)
186  elog(LOG,
187  "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
188  nkeys, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
189 #endif
190 
191  base->nKeys = nkeys;
192 
193  TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
194  false, /* no unique check */
195  nkeys,
196  workMem,
197  sortopt & TUPLESORT_RANDOMACCESS,
198  PARALLEL_SORT(coordinate));
199 
201  base->comparetup = comparetup_heap;
203  base->writetup = writetup_heap;
204  base->readtup = readtup_heap;
205  base->haveDatum1 = true;
206  base->arg = tupDesc; /* assume we need not copy tupDesc */
207 
208  /* Prepare SortSupport data for each column */
209  base->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
210 
211  for (i = 0; i < nkeys; i++)
212  {
213  SortSupport sortKey = base->sortKeys + i;
214 
215  Assert(attNums[i] != 0);
216  Assert(sortOperators[i] != 0);
217 
218  sortKey->ssup_cxt = CurrentMemoryContext;
219  sortKey->ssup_collation = sortCollations[i];
220  sortKey->ssup_nulls_first = nullsFirstFlags[i];
221  sortKey->ssup_attno = attNums[i];
222  /* Convey if abbreviation optimization is applicable in principle */
223  sortKey->abbreviate = (i == 0 && base->haveDatum1);
224 
225  PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
226  }
227 
228  /*
229  * The "onlyKey" optimization cannot be used with abbreviated keys, since
230  * tie-breaker comparisons may be required. Typically, the optimization
231  * is only of value to pass-by-value types anyway, whereas abbreviated
232  * keys are typically only of value to pass-by-reference types.
233  */
234  if (nkeys == 1 && !base->sortKeys->abbrev_converter)
235  base->onlyKey = base->sortKeys;
236 
237  MemoryContextSwitchTo(oldcontext);
238 
239  return state;
240 }
241 
244  Relation indexRel,
245  int workMem,
246  SortCoordinate coordinate, int sortopt)
247 {
248  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
249  sortopt);
251  BTScanInsert indexScanKey;
252  MemoryContext oldcontext;
254  int i;
255 
256  Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
257 
258  oldcontext = MemoryContextSwitchTo(base->maincontext);
260 
261 #ifdef TRACE_SORT
262  if (trace_sort)
263  elog(LOG,
264  "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
266  workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
267 #endif
268 
270 
271  TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
272  false, /* no unique check */
273  base->nKeys,
274  workMem,
275  sortopt & TUPLESORT_RANDOMACCESS,
276  PARALLEL_SORT(coordinate));
277 
281  base->writetup = writetup_cluster;
282  base->readtup = readtup_cluster;
284  base->arg = arg;
285 
286  arg->indexInfo = BuildIndexInfo(indexRel);
287 
288  /*
289  * If we don't have a simple leading attribute, we don't currently
290  * initialize datum1, so disable optimizations that require it.
291  */
292  if (arg->indexInfo->ii_IndexAttrNumbers[0] == 0)
293  base->haveDatum1 = false;
294  else
295  base->haveDatum1 = true;
296 
297  arg->tupDesc = tupDesc; /* assume we need not copy tupDesc */
298 
299  indexScanKey = _bt_mkscankey(indexRel, NULL);
300 
301  if (arg->indexInfo->ii_Expressions != NULL)
302  {
303  TupleTableSlot *slot;
304  ExprContext *econtext;
305 
306  /*
307  * We will need to use FormIndexDatum to evaluate the index
308  * expressions. To do that, we need an EState, as well as a
309  * TupleTableSlot to put the table tuples into. The econtext's
310  * scantuple has to point to that slot, too.
311  */
312  arg->estate = CreateExecutorState();
313  slot = MakeSingleTupleTableSlot(tupDesc, &TTSOpsHeapTuple);
314  econtext = GetPerTupleExprContext(arg->estate);
315  econtext->ecxt_scantuple = slot;
316  }
317 
318  /* Prepare SortSupport data for each column */
319  base->sortKeys = (SortSupport) palloc0(base->nKeys *
320  sizeof(SortSupportData));
321 
322  for (i = 0; i < base->nKeys; i++)
323  {
324  SortSupport sortKey = base->sortKeys + i;
325  ScanKey scanKey = indexScanKey->scankeys + i;
326  int16 strategy;
327 
328  sortKey->ssup_cxt = CurrentMemoryContext;
329  sortKey->ssup_collation = scanKey->sk_collation;
330  sortKey->ssup_nulls_first =
331  (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
332  sortKey->ssup_attno = scanKey->sk_attno;
333  /* Convey if abbreviation optimization is applicable in principle */
334  sortKey->abbreviate = (i == 0 && base->haveDatum1);
335 
336  Assert(sortKey->ssup_attno != 0);
337 
338  strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
340 
341  PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
342  }
343 
344  pfree(indexScanKey);
345 
346  MemoryContextSwitchTo(oldcontext);
347 
348  return state;
349 }
350 
353  Relation indexRel,
354  bool enforceUnique,
355  bool uniqueNullsNotDistinct,
356  int workMem,
357  SortCoordinate coordinate,
358  int sortopt)
359 {
360  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
361  sortopt);
363  BTScanInsert indexScanKey;
365  MemoryContext oldcontext;
366  int i;
367 
368  oldcontext = MemoryContextSwitchTo(base->maincontext);
370 
371 #ifdef TRACE_SORT
372  if (trace_sort)
373  elog(LOG,
374  "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
375  enforceUnique ? 't' : 'f',
376  workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
377 #endif
378 
380 
381  TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
382  enforceUnique,
383  base->nKeys,
384  workMem,
385  sortopt & TUPLESORT_RANDOMACCESS,
386  PARALLEL_SORT(coordinate));
387 
391  base->writetup = writetup_index;
392  base->readtup = readtup_index;
393  base->haveDatum1 = true;
394  base->arg = arg;
395 
396  arg->index.heapRel = heapRel;
397  arg->index.indexRel = indexRel;
398  arg->enforceUnique = enforceUnique;
399  arg->uniqueNullsNotDistinct = uniqueNullsNotDistinct;
400 
401  indexScanKey = _bt_mkscankey(indexRel, NULL);
402 
403  /* Prepare SortSupport data for each column */
404  base->sortKeys = (SortSupport) palloc0(base->nKeys *
405  sizeof(SortSupportData));
406 
407  for (i = 0; i < base->nKeys; i++)
408  {
409  SortSupport sortKey = base->sortKeys + i;
410  ScanKey scanKey = indexScanKey->scankeys + i;
411  int16 strategy;
412 
413  sortKey->ssup_cxt = CurrentMemoryContext;
414  sortKey->ssup_collation = scanKey->sk_collation;
415  sortKey->ssup_nulls_first =
416  (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
417  sortKey->ssup_attno = scanKey->sk_attno;
418  /* Convey if abbreviation optimization is applicable in principle */
419  sortKey->abbreviate = (i == 0 && base->haveDatum1);
420 
421  Assert(sortKey->ssup_attno != 0);
422 
423  strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
425 
426  PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
427  }
428 
429  pfree(indexScanKey);
430 
431  MemoryContextSwitchTo(oldcontext);
432 
433  return state;
434 }
435 
438  Relation indexRel,
439  uint32 high_mask,
440  uint32 low_mask,
441  uint32 max_buckets,
442  int workMem,
443  SortCoordinate coordinate,
444  int sortopt)
445 {
446  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
447  sortopt);
449  MemoryContext oldcontext;
451 
452  oldcontext = MemoryContextSwitchTo(base->maincontext);
454 
455 #ifdef TRACE_SORT
456  if (trace_sort)
457  elog(LOG,
458  "begin index sort: high_mask = 0x%x, low_mask = 0x%x, "
459  "max_buckets = 0x%x, workMem = %d, randomAccess = %c",
460  high_mask,
461  low_mask,
462  max_buckets,
463  workMem,
464  sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
465 #endif
466 
467  base->nKeys = 1; /* Only one sort column, the hash code */
468 
472  base->writetup = writetup_index;
473  base->readtup = readtup_index;
474  base->haveDatum1 = true;
475  base->arg = arg;
476 
477  arg->index.heapRel = heapRel;
478  arg->index.indexRel = indexRel;
479 
480  arg->high_mask = high_mask;
481  arg->low_mask = low_mask;
482  arg->max_buckets = max_buckets;
483 
484  MemoryContextSwitchTo(oldcontext);
485 
486  return state;
487 }
488 
491  Relation indexRel,
492  int workMem,
493  SortCoordinate coordinate,
494  int sortopt)
495 {
496  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
497  sortopt);
499  MemoryContext oldcontext;
501  int i;
502 
503  oldcontext = MemoryContextSwitchTo(base->maincontext);
505 
506 #ifdef TRACE_SORT
507  if (trace_sort)
508  elog(LOG,
509  "begin index sort: workMem = %d, randomAccess = %c",
510  workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
511 #endif
512 
514 
518  base->writetup = writetup_index;
519  base->readtup = readtup_index;
520  base->haveDatum1 = true;
521  base->arg = arg;
522 
523  arg->index.heapRel = heapRel;
524  arg->index.indexRel = indexRel;
525  arg->enforceUnique = false;
526  arg->uniqueNullsNotDistinct = false;
527 
528  /* Prepare SortSupport data for each column */
529  base->sortKeys = (SortSupport) palloc0(base->nKeys *
530  sizeof(SortSupportData));
531 
532  for (i = 0; i < base->nKeys; i++)
533  {
534  SortSupport sortKey = base->sortKeys + i;
535 
536  sortKey->ssup_cxt = CurrentMemoryContext;
537  sortKey->ssup_collation = indexRel->rd_indcollation[i];
538  sortKey->ssup_nulls_first = false;
539  sortKey->ssup_attno = i + 1;
540  /* Convey if abbreviation optimization is applicable in principle */
541  sortKey->abbreviate = (i == 0 && base->haveDatum1);
542 
543  Assert(sortKey->ssup_attno != 0);
544 
545  /* Look for a sort support function */
546  PrepareSortSupportFromGistIndexRel(indexRel, sortKey);
547  }
548 
549  MemoryContextSwitchTo(oldcontext);
550 
551  return state;
552 }
553 
556  SortCoordinate coordinate,
557  int sortopt)
558 {
559  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
560  sortopt);
562 
563 #ifdef TRACE_SORT
564  if (trace_sort)
565  elog(LOG,
566  "begin index sort: workMem = %d, randomAccess = %c",
567  workMem,
568  sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
569 #endif
570 
571  base->nKeys = 1; /* Only one sort column, the block number */
572 
576  base->readtup = readtup_index_brin;
577  base->haveDatum1 = true;
578  base->arg = NULL;
579 
580  return state;
581 }
582 
584 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
585  bool nullsFirstFlag, int workMem,
586  SortCoordinate coordinate, int sortopt)
587 {
588  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
589  sortopt);
592  MemoryContext oldcontext;
593  int16 typlen;
594  bool typbyval;
595 
596  oldcontext = MemoryContextSwitchTo(base->maincontext);
598 
599 #ifdef TRACE_SORT
600  if (trace_sort)
601  elog(LOG,
602  "begin datum sort: workMem = %d, randomAccess = %c",
603  workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
604 #endif
605 
606  base->nKeys = 1; /* always a one-column sort */
607 
608  TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
609  false, /* no unique check */
610  1,
611  workMem,
612  sortopt & TUPLESORT_RANDOMACCESS,
613  PARALLEL_SORT(coordinate));
614 
618  base->writetup = writetup_datum;
619  base->readtup = readtup_datum;
620  base->haveDatum1 = true;
621  base->arg = arg;
622 
623  arg->datumType = datumType;
624 
625  /* lookup necessary attributes of the datum type */
626  get_typlenbyval(datumType, &typlen, &typbyval);
627  arg->datumTypeLen = typlen;
628  base->tuples = !typbyval;
629 
630  /* Prepare SortSupport data */
631  base->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
632 
634  base->sortKeys->ssup_collation = sortCollation;
635  base->sortKeys->ssup_nulls_first = nullsFirstFlag;
636 
637  /*
638  * Abbreviation is possible here only for by-reference types. In theory,
639  * a pass-by-value datatype could have an abbreviated form that is cheaper
640  * to compare. In a tuple sort, we could support that, because we can
641  * always extract the original datum from the tuple as needed. Here, we
642  * can't, because a datum sort only stores a single copy of the datum; the
643  * "tuple" field of each SortTuple is NULL.
644  */
645  base->sortKeys->abbreviate = !typbyval;
646 
647  PrepareSortSupportFromOrderingOp(sortOperator, base->sortKeys);
648 
649  /*
650  * The "onlyKey" optimization cannot be used with abbreviated keys, since
651  * tie-breaker comparisons may be required. Typically, the optimization
652  * is only of value to pass-by-value types anyway, whereas abbreviated
653  * keys are typically only of value to pass-by-reference types.
654  */
655  if (!base->sortKeys->abbrev_converter)
656  base->onlyKey = base->sortKeys;
657 
658  MemoryContextSwitchTo(oldcontext);
659 
660  return state;
661 }
662 
663 /*
664  * Accept one tuple while collecting input data for sort.
665  *
666  * Note that the input data is always copied; the caller need not save it.
667  */
668 void
670 {
673  TupleDesc tupDesc = (TupleDesc) base->arg;
674  SortTuple stup;
675  MinimalTuple tuple;
676  HeapTupleData htup;
677 
678  /* copy the tuple into sort storage */
679  tuple = ExecCopySlotMinimalTuple(slot);
680  stup.tuple = (void *) tuple;
681  /* set up first-column key value */
682  htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
683  htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
684  stup.datum1 = heap_getattr(&htup,
685  base->sortKeys[0].ssup_attno,
686  tupDesc,
687  &stup.isnull1);
688 
690  base->sortKeys->abbrev_converter &&
691  !stup.isnull1);
692 
693  MemoryContextSwitchTo(oldcontext);
694 }
695 
696 /*
697  * Accept one tuple while collecting input data for sort.
698  *
699  * Note that the input data is always copied; the caller need not save it.
700  */
701 void
703 {
704  SortTuple stup;
708 
709  /* copy the tuple into sort storage */
710  tup = heap_copytuple(tup);
711  stup.tuple = (void *) tup;
712 
713  /*
714  * set up first-column key value, and potentially abbreviate, if it's a
715  * simple column
716  */
717  if (base->haveDatum1)
718  {
719  stup.datum1 = heap_getattr(tup,
720  arg->indexInfo->ii_IndexAttrNumbers[0],
721  arg->tupDesc,
722  &stup.isnull1);
723  }
724 
726  base->haveDatum1 &&
727  base->sortKeys->abbrev_converter &&
728  !stup.isnull1);
729 
730  MemoryContextSwitchTo(oldcontext);
731 }
732 
733 /*
734  * Collect one index tuple while collecting input data for sort, building
735  * it from caller-supplied values.
736  */
737 void
739  ItemPointer self, const Datum *values,
740  const bool *isnull)
741 {
742  SortTuple stup;
743  IndexTuple tuple;
746 
748  isnull, base->tuplecontext);
749  tuple = ((IndexTuple) stup.tuple);
750  tuple->t_tid = *self;
751  /* set up first-column key value */
752  stup.datum1 = index_getattr(tuple,
753  1,
754  RelationGetDescr(arg->indexRel),
755  &stup.isnull1);
756 
758  base->sortKeys &&
759  base->sortKeys->abbrev_converter &&
760  !stup.isnull1);
761 }
762 
763 /*
764  * Collect one BRIN tuple while collecting input data for sort.
765  */
766 void
768 {
769  SortTuple stup;
770  BrinSortTuple *bstup;
773 
774  /* allocate space for the whole BRIN sort tuple */
775  bstup = palloc(BRINSORTTUPLE_SIZE(size));
776 
777  bstup->tuplen = size;
778  memcpy(&bstup->tuple, tuple, size);
779 
780  stup.tuple = bstup;
781  stup.datum1 = tuple->bt_blkno;
782  stup.isnull1 = false;
783 
785  base->sortKeys &&
786  base->sortKeys->abbrev_converter &&
787  !stup.isnull1);
788 
789  MemoryContextSwitchTo(oldcontext);
790 }
791 
792 /*
793  * Accept one Datum while collecting input data for sort.
794  *
795  * If the Datum is pass-by-ref type, the value will be copied.
796  */
797 void
799 {
803  SortTuple stup;
804 
805  /*
806  * Pass-by-value types or null values are just stored directly in
807  * stup.datum1 (and stup.tuple is not used and set to NULL).
808  *
809  * Non-null pass-by-reference values need to be copied into memory we
810  * control, and possibly abbreviated. The copied value is pointed to by
811  * stup.tuple and is treated as the canonical copy (e.g. to return via
812  * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
813  * abbreviated value if abbreviation is happening, otherwise it's
814  * identical to stup.tuple.
815  */
816 
817  if (isNull || !base->tuples)
818  {
819  /*
820  * Set datum1 to zeroed representation for NULLs (to be consistent,
821  * and to support cheap inequality tests for NULL abbreviated keys).
822  */
823  stup.datum1 = !isNull ? val : (Datum) 0;
824  stup.isnull1 = isNull;
825  stup.tuple = NULL; /* no separate storage */
826  }
827  else
828  {
829  stup.isnull1 = false;
830  stup.datum1 = datumCopy(val, false, arg->datumTypeLen);
831  stup.tuple = DatumGetPointer(stup.datum1);
832  }
833 
835  base->tuples &&
836  base->sortKeys->abbrev_converter && !isNull);
837 
838  MemoryContextSwitchTo(oldcontext);
839 }
840 
841 /*
842  * Fetch the next tuple in either forward or back direction.
843  * If successful, put tuple in slot and return true; else, clear the slot
844  * and return false.
845  *
846  * Caller may optionally be passed back abbreviated value (on true return
847  * value) when abbreviation was used, which can be used to cheaply avoid
848  * equality checks that might otherwise be required. Caller can safely make a
849  * determination of "non-equal tuple" based on simple binary inequality. A
850  * NULL value in leading attribute will set abbreviated value to zeroed
851  * representation, which caller may rely on in abbreviated inequality check.
852  *
853  * If copy is true, the slot receives a tuple that's been copied into the
854  * caller's memory context, so that it will stay valid regardless of future
855  * manipulations of the tuplesort's state (up to and including deleting the
856  * tuplesort). If copy is false, the slot will just receive a pointer to a
857  * tuple held within the tuplesort, which is more efficient, but only safe for
858  * callers that are prepared to have any subsequent manipulation of the
859  * tuplesort's state invalidate slot contents.
860  */
861 bool
862 tuplesort_gettupleslot(Tuplesortstate *state, bool forward, bool copy,
863  TupleTableSlot *slot, Datum *abbrev)
864 {
866  MemoryContext oldcontext = MemoryContextSwitchTo(base->sortcontext);
867  SortTuple stup;
868 
869  if (!tuplesort_gettuple_common(state, forward, &stup))
870  stup.tuple = NULL;
871 
872  MemoryContextSwitchTo(oldcontext);
873 
874  if (stup.tuple)
875  {
876  /* Record abbreviated key for caller */
877  if (base->sortKeys->abbrev_converter && abbrev)
878  *abbrev = stup.datum1;
879 
880  if (copy)
882 
883  ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, copy);
884  return true;
885  }
886  else
887  {
888  ExecClearTuple(slot);
889  return false;
890  }
891 }
892 
893 /*
894  * Fetch the next tuple in either forward or back direction.
895  * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
896  * context, and must not be freed by caller. Caller may not rely on tuple
897  * remaining valid after any further manipulation of tuplesort.
898  */
899 HeapTuple
901 {
903  MemoryContext oldcontext = MemoryContextSwitchTo(base->sortcontext);
904  SortTuple stup;
905 
906  if (!tuplesort_gettuple_common(state, forward, &stup))
907  stup.tuple = NULL;
908 
909  MemoryContextSwitchTo(oldcontext);
910 
911  return stup.tuple;
912 }
913 
914 /*
915  * Fetch the next index tuple in either forward or back direction.
916  * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
917  * context, and must not be freed by caller. Caller may not rely on tuple
918  * remaining valid after any further manipulation of tuplesort.
919  */
922 {
924  MemoryContext oldcontext = MemoryContextSwitchTo(base->sortcontext);
925  SortTuple stup;
926 
927  if (!tuplesort_gettuple_common(state, forward, &stup))
928  stup.tuple = NULL;
929 
930  MemoryContextSwitchTo(oldcontext);
931 
932  return (IndexTuple) stup.tuple;
933 }
934 
935 /*
936  * Fetch the next BRIN tuple in either forward or back direction.
937  * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
938  * context, and must not be freed by caller. Caller may not rely on tuple
939  * remaining valid after any further manipulation of tuplesort.
940  */
941 BrinTuple *
943 {
945  MemoryContext oldcontext = MemoryContextSwitchTo(base->sortcontext);
946  SortTuple stup;
947  BrinSortTuple *btup;
948 
949  if (!tuplesort_gettuple_common(state, forward, &stup))
950  stup.tuple = NULL;
951 
952  MemoryContextSwitchTo(oldcontext);
953 
954  if (!stup.tuple)
955  return NULL;
956 
957  btup = (BrinSortTuple *) stup.tuple;
958 
959  *len = btup->tuplen;
960 
961  return &btup->tuple;
962 }
963 
964 /*
965  * Fetch the next Datum in either forward or back direction.
966  * Returns false if no more datums.
967  *
968  * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
969  * in caller's context, and is now owned by the caller (this differs from
970  * similar routines for other types of tuplesorts).
971  *
972  * Caller may optionally be passed back abbreviated value (on true return
973  * value) when abbreviation was used, which can be used to cheaply avoid
974  * equality checks that might otherwise be required. Caller can safely make a
975  * determination of "non-equal tuple" based on simple binary inequality. A
976  * NULL value will have a zeroed abbreviated value representation, which caller
977  * may rely on in abbreviated inequality check.
978  *
979  * For byref Datums, if copy is true, *val is set to a copy of the Datum
980  * copied into the caller's memory context, so that it will stay valid
981  * regardless of future manipulations of the tuplesort's state (up to and
982  * including deleting the tuplesort). If copy is false, *val will just be
983  * set to a pointer to the Datum held within the tuplesort, which is more
984  * efficient, but only safe for callers that are prepared to have any
985  * subsequent manipulation of the tuplesort's state invalidate slot contents.
986  * For byval Datums, the value of the 'copy' parameter has no effect.
987 
988  */
989 bool
990 tuplesort_getdatum(Tuplesortstate *state, bool forward, bool copy,
991  Datum *val, bool *isNull, Datum *abbrev)
992 {
994  MemoryContext oldcontext = MemoryContextSwitchTo(base->sortcontext);
996  SortTuple stup;
997 
998  if (!tuplesort_gettuple_common(state, forward, &stup))
999  {
1000  MemoryContextSwitchTo(oldcontext);
1001  return false;
1002  }
1003 
1004  /* Ensure we copy into caller's memory context */
1005  MemoryContextSwitchTo(oldcontext);
1006 
1007  /* Record abbreviated key for caller */
1008  if (base->sortKeys->abbrev_converter && abbrev)
1009  *abbrev = stup.datum1;
1010 
1011  if (stup.isnull1 || !base->tuples)
1012  {
1013  *val = stup.datum1;
1014  *isNull = stup.isnull1;
1015  }
1016  else
1017  {
1018  /* use stup.tuple because stup.datum1 may be an abbreviation */
1019  if (copy)
1020  *val = datumCopy(PointerGetDatum(stup.tuple), false,
1021  arg->datumTypeLen);
1022  else
1023  *val = PointerGetDatum(stup.tuple);
1024  *isNull = false;
1025  }
1026 
1027  return true;
1028 }
1029 
1030 
1031 /*
1032  * Routines specialized for HeapTuple (actually MinimalTuple) case
1033  */
1034 
1035 static void
1037 {
1038  int i;
1040 
1041  for (i = 0; i < count; i++)
1042  {
1043  HeapTupleData htup;
1044 
1045  htup.t_len = ((MinimalTuple) stups[i].tuple)->t_len +
1047  htup.t_data = (HeapTupleHeader) ((char *) stups[i].tuple -
1049  stups[i].datum1 = heap_getattr(&htup,
1050  base->sortKeys[0].ssup_attno,
1051  (TupleDesc) base->arg,
1052  &stups[i].isnull1);
1053  }
1054 }
1055 
1056 static int
1058 {
1060  SortSupport sortKey = base->sortKeys;
1061  int32 compare;
1062 
1063 
1064  /* Compare the leading sort key */
1065  compare = ApplySortComparator(a->datum1, a->isnull1,
1066  b->datum1, b->isnull1,
1067  sortKey);
1068  if (compare != 0)
1069  return compare;
1070 
1071  /* Compare additional sort keys */
1072  return comparetup_heap_tiebreak(a, b, state);
1073 }
1074 
1075 static int
1077 {
1079  SortSupport sortKey = base->sortKeys;
1080  HeapTupleData ltup;
1081  HeapTupleData rtup;
1082  TupleDesc tupDesc;
1083  int nkey;
1084  int32 compare;
1085  AttrNumber attno;
1086  Datum datum1,
1087  datum2;
1088  bool isnull1,
1089  isnull2;
1090 
1091  ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
1092  ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
1093  rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
1094  rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
1095  tupDesc = (TupleDesc) base->arg;
1096 
1097  if (sortKey->abbrev_converter)
1098  {
1099  attno = sortKey->ssup_attno;
1100 
1101  datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
1102  datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
1103 
1104  compare = ApplySortAbbrevFullComparator(datum1, isnull1,
1105  datum2, isnull2,
1106  sortKey);
1107  if (compare != 0)
1108  return compare;
1109  }
1110 
1111  sortKey++;
1112  for (nkey = 1; nkey < base->nKeys; nkey++, sortKey++)
1113  {
1114  attno = sortKey->ssup_attno;
1115 
1116  datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
1117  datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
1118 
1119  compare = ApplySortComparator(datum1, isnull1,
1120  datum2, isnull2,
1121  sortKey);
1122  if (compare != 0)
1123  return compare;
1124  }
1125 
1126  return 0;
1127 }
1128 
1129 static void
1131 {
1133  MinimalTuple tuple = (MinimalTuple) stup->tuple;
1134 
1135  /* the part of the MinimalTuple we'll write: */
1136  char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
1137  unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
1138 
1139  /* total on-disk footprint: */
1140  unsigned int tuplen = tupbodylen + sizeof(int);
1141 
1142  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1143  LogicalTapeWrite(tape, tupbody, tupbodylen);
1144  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1145  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1146 }
1147 
1148 static void
1150  LogicalTape *tape, unsigned int len)
1151 {
1152  unsigned int tupbodylen = len - sizeof(int);
1153  unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
1155  char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
1157  HeapTupleData htup;
1158 
1159  /* read in the tuple proper */
1160  tuple->t_len = tuplen;
1161  LogicalTapeReadExact(tape, tupbody, tupbodylen);
1162  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1163  LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1164  stup->tuple = (void *) tuple;
1165  /* set up first-column key value */
1166  htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
1167  htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
1168  stup->datum1 = heap_getattr(&htup,
1169  base->sortKeys[0].ssup_attno,
1170  (TupleDesc) base->arg,
1171  &stup->isnull1);
1172 }
1173 
1174 /*
1175  * Routines specialized for the CLUSTER case (HeapTuple data, with
1176  * comparisons per a btree index definition)
1177  */
1178 
1179 static void
1181 {
1182  int i;
1185 
1186  for (i = 0; i < count; i++)
1187  {
1188  HeapTuple tup;
1189 
1190  tup = (HeapTuple) stups[i].tuple;
1191  stups[i].datum1 = heap_getattr(tup,
1192  arg->indexInfo->ii_IndexAttrNumbers[0],
1193  arg->tupDesc,
1194  &stups[i].isnull1);
1195  }
1196 }
1197 
1198 static int
1201 {
1203  SortSupport sortKey = base->sortKeys;
1204  int32 compare;
1205 
1206  /* Compare the leading sort key, if it's simple */
1207  if (base->haveDatum1)
1208  {
1209  compare = ApplySortComparator(a->datum1, a->isnull1,
1210  b->datum1, b->isnull1,
1211  sortKey);
1212  if (compare != 0)
1213  return compare;
1214  }
1215 
1217 }
1218 
1219 static int
1222 {
1225  SortSupport sortKey = base->sortKeys;
1226  HeapTuple ltup;
1227  HeapTuple rtup;
1228  TupleDesc tupDesc;
1229  int nkey;
1230  int32 compare = 0;
1231  Datum datum1,
1232  datum2;
1233  bool isnull1,
1234  isnull2;
1235 
1236  ltup = (HeapTuple) a->tuple;
1237  rtup = (HeapTuple) b->tuple;
1238  tupDesc = arg->tupDesc;
1239 
1240  /* Compare the leading sort key, if it's simple */
1241  if (base->haveDatum1)
1242  {
1243  if (sortKey->abbrev_converter)
1244  {
1245  AttrNumber leading = arg->indexInfo->ii_IndexAttrNumbers[0];
1246 
1247  datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
1248  datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
1249 
1250  compare = ApplySortAbbrevFullComparator(datum1, isnull1,
1251  datum2, isnull2,
1252  sortKey);
1253  }
1254  if (compare != 0 || base->nKeys == 1)
1255  return compare;
1256  /* Compare additional columns the hard way */
1257  sortKey++;
1258  nkey = 1;
1259  }
1260  else
1261  {
1262  /* Must compare all keys the hard way */
1263  nkey = 0;
1264  }
1265 
1266  if (arg->indexInfo->ii_Expressions == NULL)
1267  {
1268  /* If not expression index, just compare the proper heap attrs */
1269 
1270  for (; nkey < base->nKeys; nkey++, sortKey++)
1271  {
1272  AttrNumber attno = arg->indexInfo->ii_IndexAttrNumbers[nkey];
1273 
1274  datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
1275  datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
1276 
1277  compare = ApplySortComparator(datum1, isnull1,
1278  datum2, isnull2,
1279  sortKey);
1280  if (compare != 0)
1281  return compare;
1282  }
1283  }
1284  else
1285  {
1286  /*
1287  * In the expression index case, compute the whole index tuple and
1288  * then compare values. It would perhaps be faster to compute only as
1289  * many columns as we need to compare, but that would require
1290  * duplicating all the logic in FormIndexDatum.
1291  */
1292  Datum l_index_values[INDEX_MAX_KEYS];
1293  bool l_index_isnull[INDEX_MAX_KEYS];
1294  Datum r_index_values[INDEX_MAX_KEYS];
1295  bool r_index_isnull[INDEX_MAX_KEYS];
1296  TupleTableSlot *ecxt_scantuple;
1297 
1298  /* Reset context each time to prevent memory leakage */
1299  ResetPerTupleExprContext(arg->estate);
1300 
1301  ecxt_scantuple = GetPerTupleExprContext(arg->estate)->ecxt_scantuple;
1302 
1303  ExecStoreHeapTuple(ltup, ecxt_scantuple, false);
1304  FormIndexDatum(arg->indexInfo, ecxt_scantuple, arg->estate,
1305  l_index_values, l_index_isnull);
1306 
1307  ExecStoreHeapTuple(rtup, ecxt_scantuple, false);
1308  FormIndexDatum(arg->indexInfo, ecxt_scantuple, arg->estate,
1309  r_index_values, r_index_isnull);
1310 
1311  for (; nkey < base->nKeys; nkey++, sortKey++)
1312  {
1313  compare = ApplySortComparator(l_index_values[nkey],
1314  l_index_isnull[nkey],
1315  r_index_values[nkey],
1316  r_index_isnull[nkey],
1317  sortKey);
1318  if (compare != 0)
1319  return compare;
1320  }
1321  }
1322 
1323  return 0;
1324 }
1325 
1326 static void
1328 {
1330  HeapTuple tuple = (HeapTuple) stup->tuple;
1331  unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
1332 
1333  /* We need to store t_self, but not other fields of HeapTupleData */
1334  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1335  LogicalTapeWrite(tape, &tuple->t_self, sizeof(ItemPointerData));
1336  LogicalTapeWrite(tape, tuple->t_data, tuple->t_len);
1337  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1338  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1339 }
1340 
1341 static void
1343  LogicalTape *tape, unsigned int tuplen)
1344 {
1347  unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
1349  t_len + HEAPTUPLESIZE);
1350 
1351  /* Reconstruct the HeapTupleData header */
1352  tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
1353  tuple->t_len = t_len;
1354  LogicalTapeReadExact(tape, &tuple->t_self, sizeof(ItemPointerData));
1355  /* We don't currently bother to reconstruct t_tableOid */
1356  tuple->t_tableOid = InvalidOid;
1357  /* Read in the tuple body */
1358  LogicalTapeReadExact(tape, tuple->t_data, tuple->t_len);
1359  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1360  LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1361  stup->tuple = (void *) tuple;
1362  /* set up first-column key value, if it's a simple column */
1363  if (base->haveDatum1)
1364  stup->datum1 = heap_getattr(tuple,
1365  arg->indexInfo->ii_IndexAttrNumbers[0],
1366  arg->tupDesc,
1367  &stup->isnull1);
1368 }
1369 
1370 static void
1372 {
1375 
1376  /* Free any execution state created for CLUSTER case */
1377  if (arg->estate != NULL)
1378  {
1379  ExprContext *econtext = GetPerTupleExprContext(arg->estate);
1380 
1382  FreeExecutorState(arg->estate);
1383  }
1384 }
1385 
1386 /*
1387  * Routines specialized for IndexTuple case
1388  *
1389  * The btree and hash cases require separate comparison functions, but the
1390  * IndexTuple representation is the same so the copy/write/read support
1391  * functions can be shared.
1392  */
1393 
1394 static void
1396 {
1399  int i;
1400 
1401  for (i = 0; i < count; i++)
1402  {
1403  IndexTuple tuple;
1404 
1405  tuple = stups[i].tuple;
1406  stups[i].datum1 = index_getattr(tuple,
1407  1,
1408  RelationGetDescr(arg->indexRel),
1409  &stups[i].isnull1);
1410  }
1411 }
1412 
1413 static int
1416 {
1417  /*
1418  * This is similar to comparetup_heap(), but expects index tuples. There
1419  * is also special handling for enforcing uniqueness, and special
1420  * treatment for equal keys at the end.
1421  */
1423  SortSupport sortKey = base->sortKeys;
1424  int32 compare;
1425 
1426  /* Compare the leading sort key */
1427  compare = ApplySortComparator(a->datum1, a->isnull1,
1428  b->datum1, b->isnull1,
1429  sortKey);
1430  if (compare != 0)
1431  return compare;
1432 
1433  /* Compare additional sort keys */
1435 }
1436 
1437 static int
1440 {
1443  SortSupport sortKey = base->sortKeys;
1444  IndexTuple tuple1;
1445  IndexTuple tuple2;
1446  int keysz;
1447  TupleDesc tupDes;
1448  bool equal_hasnull = false;
1449  int nkey;
1450  int32 compare;
1451  Datum datum1,
1452  datum2;
1453  bool isnull1,
1454  isnull2;
1455 
1456  tuple1 = (IndexTuple) a->tuple;
1457  tuple2 = (IndexTuple) b->tuple;
1458  keysz = base->nKeys;
1459  tupDes = RelationGetDescr(arg->index.indexRel);
1460 
1461  if (sortKey->abbrev_converter)
1462  {
1463  datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
1464  datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
1465 
1466  compare = ApplySortAbbrevFullComparator(datum1, isnull1,
1467  datum2, isnull2,
1468  sortKey);
1469  if (compare != 0)
1470  return compare;
1471  }
1472 
1473  /* they are equal, so we only need to examine one null flag */
1474  if (a->isnull1)
1475  equal_hasnull = true;
1476 
1477  sortKey++;
1478  for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
1479  {
1480  datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
1481  datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
1482 
1483  compare = ApplySortComparator(datum1, isnull1,
1484  datum2, isnull2,
1485  sortKey);
1486  if (compare != 0)
1487  return compare; /* done when we find unequal attributes */
1488 
1489  /* they are equal, so we only need to examine one null flag */
1490  if (isnull1)
1491  equal_hasnull = true;
1492  }
1493 
1494  /*
1495  * If btree has asked us to enforce uniqueness, complain if two equal
1496  * tuples are detected (unless there was at least one NULL field and NULLS
1497  * NOT DISTINCT was not set).
1498  *
1499  * It is sufficient to make the test here, because if two tuples are equal
1500  * they *must* get compared at some stage of the sort --- otherwise the
1501  * sort algorithm wouldn't have checked whether one must appear before the
1502  * other.
1503  */
1504  if (arg->enforceUnique && !(!arg->uniqueNullsNotDistinct && equal_hasnull))
1505  {
1507  bool isnull[INDEX_MAX_KEYS];
1508  char *key_desc;
1509 
1510  /*
1511  * Some rather brain-dead implementations of qsort (such as the one in
1512  * QNX 4) will sometimes call the comparison routine to compare a
1513  * value to itself, but we always use our own implementation, which
1514  * does not.
1515  */
1516  Assert(tuple1 != tuple2);
1517 
1518  index_deform_tuple(tuple1, tupDes, values, isnull);
1519 
1520  key_desc = BuildIndexValueDescription(arg->index.indexRel, values, isnull);
1521 
1522  ereport(ERROR,
1523  (errcode(ERRCODE_UNIQUE_VIOLATION),
1524  errmsg("could not create unique index \"%s\"",
1525  RelationGetRelationName(arg->index.indexRel)),
1526  key_desc ? errdetail("Key %s is duplicated.", key_desc) :
1527  errdetail("Duplicate keys exist."),
1528  errtableconstraint(arg->index.heapRel,
1529  RelationGetRelationName(arg->index.indexRel))));
1530  }
1531 
1532  /*
1533  * If key values are equal, we sort on ItemPointer. This is required for
1534  * btree indexes, since heap TID is treated as an implicit last key
1535  * attribute in order to ensure that all keys in the index are physically
1536  * unique.
1537  */
1538  {
1539  BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
1540  BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
1541 
1542  if (blk1 != blk2)
1543  return (blk1 < blk2) ? -1 : 1;
1544  }
1545  {
1546  OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
1547  OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
1548 
1549  if (pos1 != pos2)
1550  return (pos1 < pos2) ? -1 : 1;
1551  }
1552 
1553  /* ItemPointer values should never be equal */
1554  Assert(false);
1555 
1556  return 0;
1557 }
1558 
1559 static int
1562 {
1563  Bucket bucket1;
1564  Bucket bucket2;
1565  uint32 hash1;
1566  uint32 hash2;
1567  IndexTuple tuple1;
1568  IndexTuple tuple2;
1571 
1572  /*
1573  * Fetch hash keys and mask off bits we don't want to sort by, so that the
1574  * initial sort is just on the bucket number. We know that the first
1575  * column of the index tuple is the hash key.
1576  */
1577  Assert(!a->isnull1);
1578  bucket1 = _hash_hashkey2bucket(DatumGetUInt32(a->datum1),
1579  arg->max_buckets, arg->high_mask,
1580  arg->low_mask);
1581  Assert(!b->isnull1);
1582  bucket2 = _hash_hashkey2bucket(DatumGetUInt32(b->datum1),
1583  arg->max_buckets, arg->high_mask,
1584  arg->low_mask);
1585  if (bucket1 > bucket2)
1586  return 1;
1587  else if (bucket1 < bucket2)
1588  return -1;
1589 
1590  /*
1591  * If bucket values are equal, sort by hash values. This allows us to
1592  * insert directly onto bucket/overflow pages, where the index tuples are
1593  * stored in hash order to allow fast binary search within each page.
1594  */
1595  hash1 = DatumGetUInt32(a->datum1);
1596  hash2 = DatumGetUInt32(b->datum1);
1597  if (hash1 > hash2)
1598  return 1;
1599  else if (hash1 < hash2)
1600  return -1;
1601 
1602  /*
1603  * If hash values are equal, we sort on ItemPointer. This does not affect
1604  * validity of the finished index, but it may be useful to have index
1605  * scans in physical order.
1606  */
1607  tuple1 = (IndexTuple) a->tuple;
1608  tuple2 = (IndexTuple) b->tuple;
1609 
1610  {
1611  BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
1612  BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
1613 
1614  if (blk1 != blk2)
1615  return (blk1 < blk2) ? -1 : 1;
1616  }
1617  {
1620 
1621  if (pos1 != pos2)
1622  return (pos1 < pos2) ? -1 : 1;
1623  }
1624 
1625  /* ItemPointer values should never be equal */
1626  Assert(false);
1627 
1628  return 0;
1629 }
1630 
1631 /*
1632  * Sorting for hash indexes only uses one sort key, so this shouldn't ever be
1633  * called. It's only here for consistency.
1634  */
1635 static int
1638 {
1639  Assert(false);
1640 
1641  return 0;
1642 }
1643 
1644 static void
1646 {
1648  IndexTuple tuple = (IndexTuple) stup->tuple;
1649  unsigned int tuplen;
1650 
1651  tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
1652  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1653  LogicalTapeWrite(tape, tuple, IndexTupleSize(tuple));
1654  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1655  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1656 }
1657 
1658 static void
1660  LogicalTape *tape, unsigned int len)
1661 {
1664  unsigned int tuplen = len - sizeof(unsigned int);
1666 
1667  LogicalTapeReadExact(tape, tuple, tuplen);
1668  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1669  LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1670  stup->tuple = (void *) tuple;
1671  /* set up first-column key value */
1672  stup->datum1 = index_getattr(tuple,
1673  1,
1674  RelationGetDescr(arg->indexRel),
1675  &stup->isnull1);
1676 }
1677 
1678 /*
1679  * Routines specialized for BrinTuple case
1680  */
1681 
1682 static void
1684 {
1685  int i;
1686 
1687  for (i = 0; i < count; i++)
1688  {
1689  BrinSortTuple *tuple;
1690 
1691  tuple = stups[i].tuple;
1692  stups[i].datum1 = tuple->tuple.bt_blkno;
1693  }
1694 }
1695 
1696 static int
1699 {
1700  Assert(TuplesortstateGetPublic(state)->haveDatum1);
1701 
1702  if (DatumGetUInt32(a->datum1) > DatumGetUInt32(b->datum1))
1703  return 1;
1704 
1705  if (DatumGetUInt32(a->datum1) < DatumGetUInt32(b->datum1))
1706  return -1;
1707 
1708  /* silence compilers */
1709  return 0;
1710 }
1711 
1712 static void
1714 {
1716  BrinSortTuple *tuple = (BrinSortTuple *) stup->tuple;
1717  unsigned int tuplen = tuple->tuplen;
1718 
1719  tuplen = tuplen + sizeof(tuplen);
1720  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1721  LogicalTapeWrite(tape, &tuple->tuple, tuple->tuplen);
1722  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1723  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1724 }
1725 
1726 static void
1728  LogicalTape *tape, unsigned int len)
1729 {
1730  BrinSortTuple *tuple;
1732  unsigned int tuplen = len - sizeof(unsigned int);
1733 
1734  /*
1735  * Allocate space for the BRIN sort tuple, which is BrinTuple with an
1736  * extra length field.
1737  */
1739  BRINSORTTUPLE_SIZE(tuplen));
1740 
1741  tuple->tuplen = tuplen;
1742 
1743  LogicalTapeReadExact(tape, &tuple->tuple, tuplen);
1744  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1745  LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1746  stup->tuple = (void *) tuple;
1747 
1748  /* set up first-column key value, which is block number */
1749  stup->datum1 = tuple->tuple.bt_blkno;
1750 }
1751 
1752 /*
1753  * Routines specialized for DatumTuple case
1754  */
1755 
1756 static void
1758 {
1759  int i;
1760 
1761  for (i = 0; i < count; i++)
1762  stups[i].datum1 = PointerGetDatum(stups[i].tuple);
1763 }
1764 
1765 static int
1767 {
1769  int compare;
1770 
1771  compare = ApplySortComparator(a->datum1, a->isnull1,
1772  b->datum1, b->isnull1,
1773  base->sortKeys);
1774  if (compare != 0)
1775  return compare;
1776 
1777  return comparetup_datum_tiebreak(a, b, state);
1778 }
1779 
1780 static int
1782 {
1784  int32 compare = 0;
1785 
1786  /* if we have abbreviations, then "tuple" has the original value */
1787  if (base->sortKeys->abbrev_converter)
1789  PointerGetDatum(b->tuple), b->isnull1,
1790  base->sortKeys);
1791 
1792  return compare;
1793 }
1794 
1795 static void
1797 {
1800  void *waddr;
1801  unsigned int tuplen;
1802  unsigned int writtenlen;
1803 
1804  if (stup->isnull1)
1805  {
1806  waddr = NULL;
1807  tuplen = 0;
1808  }
1809  else if (!base->tuples)
1810  {
1811  waddr = &stup->datum1;
1812  tuplen = sizeof(Datum);
1813  }
1814  else
1815  {
1816  waddr = stup->tuple;
1817  tuplen = datumGetSize(PointerGetDatum(stup->tuple), false, arg->datumTypeLen);
1818  Assert(tuplen != 0);
1819  }
1820 
1821  writtenlen = tuplen + sizeof(unsigned int);
1822 
1823  LogicalTapeWrite(tape, &writtenlen, sizeof(writtenlen));
1824  LogicalTapeWrite(tape, waddr, tuplen);
1825  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1826  LogicalTapeWrite(tape, &writtenlen, sizeof(writtenlen));
1827 }
1828 
1829 static void
1831  LogicalTape *tape, unsigned int len)
1832 {
1834  unsigned int tuplen = len - sizeof(unsigned int);
1835 
1836  if (tuplen == 0)
1837  {
1838  /* it's NULL */
1839  stup->datum1 = (Datum) 0;
1840  stup->isnull1 = true;
1841  stup->tuple = NULL;
1842  }
1843  else if (!base->tuples)
1844  {
1845  Assert(tuplen == sizeof(Datum));
1846  LogicalTapeReadExact(tape, &stup->datum1, tuplen);
1847  stup->isnull1 = false;
1848  stup->tuple = NULL;
1849  }
1850  else
1851  {
1852  void *raddr = tuplesort_readtup_alloc(state, tuplen);
1853 
1854  LogicalTapeReadExact(tape, raddr, tuplen);
1855  stup->datum1 = PointerGetDatum(raddr);
1856  stup->isnull1 = false;
1857  stup->tuple = raddr;
1858  }
1859 
1860  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1861  LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1862 }
int16 AttrNumber
Definition: attnum.h:21
uint32 BlockNumber
Definition: block.h:31
static Datum values[MAXATTR]
Definition: bootstrap.c:156
unsigned int uint32
Definition: c.h:495
signed short int16
Definition: c.h:482
signed int int32
Definition: c.h:483
size_t Size
Definition: c.h:594
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
Size datumGetSize(Datum value, bool typByVal, int typLen)
Definition: datum.c:65
int errdetail(const char *fmt,...)
Definition: elog.c:1208
int errcode(int sqlerrcode)
Definition: elog.c:860
int errmsg(const char *fmt,...)
Definition: elog.c:1075
#define LOG
Definition: elog.h:31
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1253
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1445
TupleTableSlot * ExecStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1351
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:84
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1237
EState * CreateExecutorState(void)
Definition: execUtils.c:93
void FreeExecutorState(EState *estate)
Definition: execUtils.c:194
#define ResetPerTupleExprContext(estate)
Definition: executor.h:558
#define GetPerTupleExprContext(estate)
Definition: executor.h:549
char * BuildIndexValueDescription(Relation indexRelation, const Datum *values, const bool *isnull)
Definition: genam.c:177
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
uint32 Bucket
Definition: hash.h:35
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:126
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:777
MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup)
Definition: heaptuple.c:1536
#define HEAPTUPLESIZE
Definition: htup.h:73
HeapTupleData * HeapTuple
Definition: htup.h:71
MinimalTupleData * MinimalTuple
Definition: htup.h:27
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MINIMAL_TUPLE_OFFSET
Definition: htup_details.h:617
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:792
#define MINIMAL_TUPLE_DATA_OFFSET
Definition: htup_details.h:621
void FormIndexDatum(IndexInfo *indexInfo, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
Definition: index.c:2736
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:2438
void index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:456
IndexTuple index_form_tuple_context(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull, MemoryContext context)
Definition: indextuple.c:65
long val
Definition: informix.c:664
int b
Definition: isn.c:70
int a
Definition: isn.c:69
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:117
Assert(fmt[strlen(fmt) - 1] !='\n')
void LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size)
Definition: logtape.c:761
void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
Definition: lsyscache.c:2206
void pfree(void *pointer)
Definition: mcxt.c:1431
void * palloc0(Size size)
Definition: mcxt.c:1232
MemoryContext CurrentMemoryContext
Definition: mcxt.c:135
void * palloc(Size size)
Definition: mcxt.c:1201
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1087
#define SK_BT_DESC
Definition: nbtree.h:1086
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:82
uint16 OffsetNumber
Definition: off.h:24
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
void * arg
#define INDEX_MAX_KEYS
const void size_t len
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:222
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
#define RelationGetDescr(relation)
Definition: rel.h:530
#define RelationGetNumberOfAttributes(relation)
Definition: rel.h:510
#define RelationGetRelationName(relation)
Definition: rel.h:538
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:523
int errtableconstraint(Relation rel, const char *conname)
Definition: relcache.c:5981
void PrepareSortSupportFromGistIndexRel(Relation indexRel, SortSupport ssup)
Definition: sortsupport.c:189
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
Definition: sortsupport.c:135
void PrepareSortSupportFromIndexRel(Relation indexRel, int16 strategy, SortSupport ssup)
Definition: sortsupport.c:162
static int ApplySortAbbrevFullComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:341
struct SortSupportData * SortSupport
Definition: sortsupport.h:58
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:200
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define BTLessStrategyNumber
Definition: stratnum.h:29
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:793
BlockNumber bt_blkno
Definition: brin_tuple.h:66
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:255
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
ItemPointerData t_tid
Definition: itup.h:37
Oid * rd_indcollation
Definition: rel.h:216
Form_pg_class rd_rel
Definition: rel.h:111
int sk_flags
Definition: skey.h:66
Oid sk_collation
Definition: skey.h:70
AttrNumber sk_attno
Definition: skey.h:67
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
Datum(* abbrev_converter)(Datum original, SortSupport ssup)
Definition: sortsupport.h:172
MemoryContext ssup_cxt
Definition: sortsupport.h:66
bool isnull1
Definition: tuplesort.h:141
void * tuple
Definition: tuplesort.h:139
Datum datum1
Definition: tuplesort.h:140
TuplesortIndexArg index
TuplesortIndexArg index
SortSupport onlyKey
Definition: tuplesort.h:235
MemoryContext maincontext
Definition: tuplesort.h:208
void(* writetup)(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
Definition: tuplesort.h:184
void(* removeabbrev)(Tuplesortstate *state, SortTuple *stups, int count)
Definition: tuplesort.h:177
void(* freestate)(Tuplesortstate *state)
Definition: tuplesort.h:202
MemoryContext tuplecontext
Definition: tuplesort.h:211
MemoryContext sortcontext
Definition: tuplesort.h:210
void(* readtup)(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
Definition: tuplesort.h:193
SortTupleComparator comparetup
Definition: tuplesort.h:164
SortSupport sortKeys
Definition: tuplesort.h:225
SortTupleComparator comparetup_tiebreak
Definition: tuplesort.h:171
Definition: regguts.h:323
struct TupleDescData * TupleDesc
Definition: tupdesc.h:89
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
Definition: tuplesort.c:643
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev)
Definition: tuplesort.c:1187
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
Definition: tuplesort.c:1493
bool trace_sort
Definition: tuplesort.c:127
void * tuplesort_readtup_alloc(Tuplesortstate *state, Size tuplen)
Definition: tuplesort.c:2918
#define PARALLEL_SORT(coordinate)
Definition: tuplesort.h:245
#define TUPLESORT_RANDOMACCESS
Definition: tuplesort.h:96
#define LogicalTapeReadExact(tape, ptr, len)
Definition: tuplesort.h:252
#define TuplesortstateGetPublic(state)
Definition: tuplesort.h:249
IndexTuple tuplesort_getindextuple(Tuplesortstate *state, bool forward)
static void writetup_index_brin(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
HeapTuple tuplesort_getheaptuple(Tuplesortstate *state, bool forward)
static void removeabbrev_datum(Tuplesortstate *state, SortTuple *stups, int count)
static int comparetup_index_btree_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
void tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
Tuplesortstate * tuplesort_begin_index_hash(Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, uint32 max_buckets, int workMem, SortCoordinate coordinate, int sortopt)
static int comparetup_index_btree(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
void tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel, ItemPointer self, const Datum *values, const bool *isnull)
static void readtup_index(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static int comparetup_cluster_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
void tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
static int comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
Tuplesortstate * tuplesort_begin_index_gist(Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
Tuplesortstate * tuplesort_begin_index_btree(Relation heapRel, Relation indexRel, bool enforceUnique, bool uniqueNullsNotDistinct, int workMem, SortCoordinate coordinate, int sortopt)
static void removeabbrev_index(Tuplesortstate *state, SortTuple *stups, int count)
static int comparetup_datum_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
Tuplesortstate * tuplesort_begin_index_brin(int workMem, SortCoordinate coordinate, int sortopt)
static void readtup_datum(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
#define INDEX_SORT
static void readtup_heap(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void readtup_cluster(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int tuplen)
static void writetup_datum(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
struct BrinSortTuple BrinSortTuple
Tuplesortstate * tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, SortCoordinate coordinate, int sortopt)
#define CLUSTER_SORT
static int comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
bool tuplesort_gettupleslot(Tuplesortstate *state, bool forward, bool copy, TupleTableSlot *slot, Datum *abbrev)
static void removeabbrev_index_brin(Tuplesortstate *state, SortTuple *stups, int count)
#define BRINSORTTUPLE_SIZE(len)
#define DATUM_SORT
static int comparetup_index_hash(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_index_brin(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
Tuplesortstate * tuplesort_begin_cluster(TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
void tuplesort_putbrintuple(Tuplesortstate *state, BrinTuple *tuple, Size size)
static void writetup_heap(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void writetup_index(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static int comparetup_heap_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void freestate_cluster(Tuplesortstate *state)
static void removeabbrev_heap(Tuplesortstate *state, SortTuple *stups, int count)
void tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
static int comparetup_cluster(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_brin(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_hash_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
bool tuplesort_getdatum(Tuplesortstate *state, bool forward, bool copy, Datum *val, bool *isNull, Datum *abbrev)
BrinTuple * tuplesort_getbrintuple(Tuplesortstate *state, Size *len, bool forward)
static void writetup_cluster(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void removeabbrev_cluster(Tuplesortstate *state, SortTuple *stups, int count)
#define HEAP_SORT
Tuplesortstate * tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, AttrNumber *attNums, Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags, int workMem, SortCoordinate coordinate, int sortopt)
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:471
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:433