PostgreSQL Source Code git master
tuplesortvariants.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * tuplesortvariants.c
4 * Implementation of tuple sorting variants.
5 *
6 * This module handles the sorting of heap tuples, index tuples, or single
7 * Datums. The implementation is based on the generalized tuple sorting
8 * facility given in tuplesort.c. Support other kinds of sortable objects
9 * could be easily added here, another module, or even an extension.
10 *
11 *
12 * Copyright (c) 2022-2025, PostgreSQL Global Development Group
13 *
14 * IDENTIFICATION
15 * src/backend/utils/sort/tuplesortvariants.c
16 *
17 *-------------------------------------------------------------------------
18 */
19
20#include "postgres.h"
21
22#include "access/brin_tuple.h"
23#include "access/gin_tuple.h"
24#include "access/hash.h"
25#include "access/htup_details.h"
26#include "access/nbtree.h"
27#include "catalog/index.h"
29#include "executor/executor.h"
30#include "pg_trace.h"
31#include "utils/datum.h"
32#include "utils/guc.h"
33#include "utils/lsyscache.h"
34#include "utils/tuplesort.h"
35
36
37/* sort-type codes for sort__start probes */
38#define HEAP_SORT 0
39#define INDEX_SORT 1
40#define DATUM_SORT 2
41#define CLUSTER_SORT 3
42
44 int count);
46 int count);
48 int count);
50 int count);
52 int count);
54 int count);
55static int comparetup_heap(const SortTuple *a, const SortTuple *b,
57static int comparetup_heap_tiebreak(const SortTuple *a, const SortTuple *b,
60 SortTuple *stup);
61static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
62 LogicalTape *tape, unsigned int len);
63static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
65static int comparetup_cluster_tiebreak(const SortTuple *a, const SortTuple *b,
68 SortTuple *stup);
70 LogicalTape *tape, unsigned int tuplen);
71static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
73static int comparetup_index_btree_tiebreak(const SortTuple *a, const SortTuple *b,
75static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
77static int comparetup_index_hash_tiebreak(const SortTuple *a, const SortTuple *b,
79static int comparetup_index_brin(const SortTuple *a, const SortTuple *b,
81static int comparetup_index_gin(const SortTuple *a, const SortTuple *b,
84 SortTuple *stup);
86 LogicalTape *tape, unsigned int len);
88 SortTuple *stup);
90 LogicalTape *tape, unsigned int len);
92 SortTuple *stup);
94 LogicalTape *tape, unsigned int len);
95static int comparetup_datum(const SortTuple *a, const SortTuple *b,
97static int comparetup_datum_tiebreak(const SortTuple *a, const SortTuple *b,
100 SortTuple *stup);
101static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
102 LogicalTape *tape, unsigned int len);
104
105/*
106 * Data structure pointed by "TuplesortPublic.arg" for the CLUSTER case. Set by
107 * the tuplesort_begin_cluster.
108 */
109typedef struct
110{
112
113 IndexInfo *indexInfo; /* info about index being used for reference */
114 EState *estate; /* for evaluating index expressions */
116
117/*
118 * Data structure pointed by "TuplesortPublic.arg" for the IndexTuple case.
119 * Set by tuplesort_begin_index_xxx and used only by the IndexTuple routines.
120 */
121typedef struct
122{
123 Relation heapRel; /* table the index is being built on */
124 Relation indexRel; /* index being built */
126
127/*
128 * Data structure pointed by "TuplesortPublic.arg" for the index_btree subcase.
129 */
130typedef struct
131{
133
134 bool enforceUnique; /* complain if we find duplicate tuples */
135 bool uniqueNullsNotDistinct; /* unique constraint null treatment */
137
138/*
139 * Data structure pointed by "TuplesortPublic.arg" for the index_hash subcase.
140 */
141typedef struct
142{
144
145 uint32 high_mask; /* masks for sortable part of hash code */
149
150/*
151 * Data structure pointed by "TuplesortPublic.arg" for the Datum case.
152 * Set by tuplesort_begin_datum and used only by the DatumTuple routines.
153 */
154typedef struct
155{
156 /* the datatype oid of Datum's to be sorted */
158 /* we need typelen in order to know how to copy the Datums. */
161
162/*
163 * Computing BrinTuple size with only the tuple is difficult, so we want to track
164 * the length referenced by the SortTuple. That's what BrinSortTuple is meant
165 * to do - it's essentially a BrinTuple prefixed by its length.
166 */
167typedef struct BrinSortTuple
168{
172
173/* Size of the BrinSortTuple, given length of the BrinTuple. */
174#define BRINSORTTUPLE_SIZE(len) (offsetof(BrinSortTuple, tuple) + (len))
175
176
179 int nkeys, AttrNumber *attNums,
180 Oid *sortOperators, Oid *sortCollations,
181 bool *nullsFirstFlags,
182 int workMem, SortCoordinate coordinate, int sortopt)
183{
184 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
185 sortopt);
187 MemoryContext oldcontext;
188 int i;
189
190 oldcontext = MemoryContextSwitchTo(base->maincontext);
191
192 Assert(nkeys > 0);
193
194 if (trace_sort)
195 elog(LOG,
196 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
197 nkeys, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
198
199 base->nKeys = nkeys;
200
201 TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
202 false, /* no unique check */
203 nkeys,
204 workMem,
205 sortopt & TUPLESORT_RANDOMACCESS,
206 PARALLEL_SORT(coordinate));
207
211 base->writetup = writetup_heap;
212 base->readtup = readtup_heap;
213 base->haveDatum1 = true;
214 base->arg = tupDesc; /* assume we need not copy tupDesc */
215
216 /* Prepare SortSupport data for each column */
217 base->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
218
219 for (i = 0; i < nkeys; i++)
220 {
221 SortSupport sortKey = base->sortKeys + i;
222
223 Assert(attNums[i] != 0);
224 Assert(sortOperators[i] != 0);
225
227 sortKey->ssup_collation = sortCollations[i];
228 sortKey->ssup_nulls_first = nullsFirstFlags[i];
229 sortKey->ssup_attno = attNums[i];
230 /* Convey if abbreviation optimization is applicable in principle */
231 sortKey->abbreviate = (i == 0 && base->haveDatum1);
232
233 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
234 }
235
236 /*
237 * The "onlyKey" optimization cannot be used with abbreviated keys, since
238 * tie-breaker comparisons may be required. Typically, the optimization
239 * is only of value to pass-by-value types anyway, whereas abbreviated
240 * keys are typically only of value to pass-by-reference types.
241 */
242 if (nkeys == 1 && !base->sortKeys->abbrev_converter)
243 base->onlyKey = base->sortKeys;
244
245 MemoryContextSwitchTo(oldcontext);
246
247 return state;
248}
249
252 Relation indexRel,
253 int workMem,
254 SortCoordinate coordinate, int sortopt)
255{
256 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
257 sortopt);
259 BTScanInsert indexScanKey;
260 MemoryContext oldcontext;
262 int i;
263
264 Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
265
266 oldcontext = MemoryContextSwitchTo(base->maincontext);
268
269 if (trace_sort)
270 elog(LOG,
271 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
273 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
274
276
277 TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
278 false, /* no unique check */
279 base->nKeys,
280 workMem,
281 sortopt & TUPLESORT_RANDOMACCESS,
282 PARALLEL_SORT(coordinate));
283
288 base->readtup = readtup_cluster;
290 base->arg = arg;
291
292 arg->indexInfo = BuildIndexInfo(indexRel);
293
294 /*
295 * If we don't have a simple leading attribute, we don't currently
296 * initialize datum1, so disable optimizations that require it.
297 */
298 if (arg->indexInfo->ii_IndexAttrNumbers[0] == 0)
299 base->haveDatum1 = false;
300 else
301 base->haveDatum1 = true;
302
303 arg->tupDesc = tupDesc; /* assume we need not copy tupDesc */
304
305 indexScanKey = _bt_mkscankey(indexRel, NULL);
306
307 if (arg->indexInfo->ii_Expressions != NULL)
308 {
309 TupleTableSlot *slot;
310 ExprContext *econtext;
311
312 /*
313 * We will need to use FormIndexDatum to evaluate the index
314 * expressions. To do that, we need an EState, as well as a
315 * TupleTableSlot to put the table tuples into. The econtext's
316 * scantuple has to point to that slot, too.
317 */
318 arg->estate = CreateExecutorState();
320 econtext = GetPerTupleExprContext(arg->estate);
321 econtext->ecxt_scantuple = slot;
322 }
323
324 /* Prepare SortSupport data for each column */
325 base->sortKeys = (SortSupport) palloc0(base->nKeys *
326 sizeof(SortSupportData));
327
328 for (i = 0; i < base->nKeys; i++)
329 {
330 SortSupport sortKey = base->sortKeys + i;
331 ScanKey scanKey = indexScanKey->scankeys + i;
332 bool reverse;
333
335 sortKey->ssup_collation = scanKey->sk_collation;
336 sortKey->ssup_nulls_first =
337 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
338 sortKey->ssup_attno = scanKey->sk_attno;
339 /* Convey if abbreviation optimization is applicable in principle */
340 sortKey->abbreviate = (i == 0 && base->haveDatum1);
341
342 Assert(sortKey->ssup_attno != 0);
343
344 reverse = (scanKey->sk_flags & SK_BT_DESC) != 0;
345
346 PrepareSortSupportFromIndexRel(indexRel, reverse, sortKey);
347 }
348
349 pfree(indexScanKey);
350
351 MemoryContextSwitchTo(oldcontext);
352
353 return state;
354}
355
358 Relation indexRel,
359 bool enforceUnique,
360 bool uniqueNullsNotDistinct,
361 int workMem,
362 SortCoordinate coordinate,
363 int sortopt)
364{
365 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
366 sortopt);
368 BTScanInsert indexScanKey;
370 MemoryContext oldcontext;
371 int i;
372
373 oldcontext = MemoryContextSwitchTo(base->maincontext);
375
376 if (trace_sort)
377 elog(LOG,
378 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
379 enforceUnique ? 't' : 'f',
380 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
381
383
384 TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
385 enforceUnique,
386 base->nKeys,
387 workMem,
388 sortopt & TUPLESORT_RANDOMACCESS,
389 PARALLEL_SORT(coordinate));
390
394 base->writetup = writetup_index;
395 base->readtup = readtup_index;
396 base->haveDatum1 = true;
397 base->arg = arg;
398
399 arg->index.heapRel = heapRel;
400 arg->index.indexRel = indexRel;
401 arg->enforceUnique = enforceUnique;
402 arg->uniqueNullsNotDistinct = uniqueNullsNotDistinct;
403
404 indexScanKey = _bt_mkscankey(indexRel, NULL);
405
406 /* Prepare SortSupport data for each column */
407 base->sortKeys = (SortSupport) palloc0(base->nKeys *
408 sizeof(SortSupportData));
409
410 for (i = 0; i < base->nKeys; i++)
411 {
412 SortSupport sortKey = base->sortKeys + i;
413 ScanKey scanKey = indexScanKey->scankeys + i;
414 bool reverse;
415
417 sortKey->ssup_collation = scanKey->sk_collation;
418 sortKey->ssup_nulls_first =
419 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
420 sortKey->ssup_attno = scanKey->sk_attno;
421 /* Convey if abbreviation optimization is applicable in principle */
422 sortKey->abbreviate = (i == 0 && base->haveDatum1);
423
424 Assert(sortKey->ssup_attno != 0);
425
426 reverse = (scanKey->sk_flags & SK_BT_DESC) != 0;
427
428 PrepareSortSupportFromIndexRel(indexRel, reverse, sortKey);
429 }
430
431 pfree(indexScanKey);
432
433 MemoryContextSwitchTo(oldcontext);
434
435 return state;
436}
437
440 Relation indexRel,
441 uint32 high_mask,
442 uint32 low_mask,
443 uint32 max_buckets,
444 int workMem,
445 SortCoordinate coordinate,
446 int sortopt)
447{
448 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
449 sortopt);
451 MemoryContext oldcontext;
453
454 oldcontext = MemoryContextSwitchTo(base->maincontext);
456
457 if (trace_sort)
458 elog(LOG,
459 "begin index sort: high_mask = 0x%x, low_mask = 0x%x, "
460 "max_buckets = 0x%x, workMem = %d, randomAccess = %c",
461 high_mask,
462 low_mask,
463 max_buckets,
464 workMem,
465 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
466
467 base->nKeys = 1; /* Only one sort column, the hash code */
468
472 base->writetup = writetup_index;
473 base->readtup = readtup_index;
474 base->haveDatum1 = true;
475 base->arg = arg;
476
477 arg->index.heapRel = heapRel;
478 arg->index.indexRel = indexRel;
479
480 arg->high_mask = high_mask;
481 arg->low_mask = low_mask;
482 arg->max_buckets = max_buckets;
483
484 MemoryContextSwitchTo(oldcontext);
485
486 return state;
487}
488
491 Relation indexRel,
492 int workMem,
493 SortCoordinate coordinate,
494 int sortopt)
495{
496 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
497 sortopt);
499 MemoryContext oldcontext;
501 int i;
502
503 oldcontext = MemoryContextSwitchTo(base->maincontext);
505
506 if (trace_sort)
507 elog(LOG,
508 "begin index sort: workMem = %d, randomAccess = %c",
509 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
510
512
516 base->writetup = writetup_index;
517 base->readtup = readtup_index;
518 base->haveDatum1 = true;
519 base->arg = arg;
520
521 arg->index.heapRel = heapRel;
522 arg->index.indexRel = indexRel;
523 arg->enforceUnique = false;
524 arg->uniqueNullsNotDistinct = false;
525
526 /* Prepare SortSupport data for each column */
527 base->sortKeys = (SortSupport) palloc0(base->nKeys *
528 sizeof(SortSupportData));
529
530 for (i = 0; i < base->nKeys; i++)
531 {
532 SortSupport sortKey = base->sortKeys + i;
533
535 sortKey->ssup_collation = indexRel->rd_indcollation[i];
536 sortKey->ssup_nulls_first = false;
537 sortKey->ssup_attno = i + 1;
538 /* Convey if abbreviation optimization is applicable in principle */
539 sortKey->abbreviate = (i == 0 && base->haveDatum1);
540
541 Assert(sortKey->ssup_attno != 0);
542
543 /* Look for a sort support function */
544 PrepareSortSupportFromGistIndexRel(indexRel, sortKey);
545 }
546
547 MemoryContextSwitchTo(oldcontext);
548
549 return state;
550}
551
554 SortCoordinate coordinate,
555 int sortopt)
556{
557 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
558 sortopt);
560
561 if (trace_sort)
562 elog(LOG,
563 "begin index sort: workMem = %d, randomAccess = %c",
564 workMem,
565 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
566
567 base->nKeys = 1; /* Only one sort column, the block number */
568
573 base->haveDatum1 = true;
574 base->arg = NULL;
575
576 return state;
577}
578
581 Relation indexRel,
582 int workMem, SortCoordinate coordinate,
583 int sortopt)
584{
585 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
586 sortopt);
588 MemoryContext oldcontext;
589 int i;
590 TupleDesc desc = RelationGetDescr(indexRel);
591
592 oldcontext = MemoryContextSwitchTo(base->maincontext);
593
594#ifdef TRACE_SORT
595 if (trace_sort)
596 elog(LOG,
597 "begin index sort: workMem = %d, randomAccess = %c",
598 workMem,
599 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
600#endif
601
602 /*
603 * Multi-column GIN indexes expand the row into a separate index entry for
604 * attribute, and that's what we write into the tuplesort. But we still
605 * need to initialize sortsupport for all the attributes.
606 */
608
609 /* Prepare SortSupport data for each column */
610 base->sortKeys = (SortSupport) palloc0(base->nKeys *
611 sizeof(SortSupportData));
612
613 for (i = 0; i < base->nKeys; i++)
614 {
615 SortSupport sortKey = base->sortKeys + i;
616 Form_pg_attribute att = TupleDescAttr(desc, i);
617 TypeCacheEntry *typentry;
618
620 sortKey->ssup_collation = indexRel->rd_indcollation[i];
621 sortKey->ssup_nulls_first = false;
622 sortKey->ssup_attno = i + 1;
623 sortKey->abbreviate = false;
624
625 Assert(sortKey->ssup_attno != 0);
626
627 if (!OidIsValid(sortKey->ssup_collation))
628 sortKey->ssup_collation = DEFAULT_COLLATION_OID;
629
630 /*
631 * Look for a ordering for the index key data type, and then the sort
632 * support function.
633 */
634 typentry = lookup_type_cache(att->atttypid, TYPECACHE_LT_OPR);
635 PrepareSortSupportFromOrderingOp(typentry->lt_opr, sortKey);
636 }
637
642 base->haveDatum1 = false;
643 base->arg = NULL;
644
645 MemoryContextSwitchTo(oldcontext);
646
647 return state;
648}
649
651tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
652 bool nullsFirstFlag, int workMem,
653 SortCoordinate coordinate, int sortopt)
654{
655 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
656 sortopt);
659 MemoryContext oldcontext;
660 int16 typlen;
661 bool typbyval;
662
663 oldcontext = MemoryContextSwitchTo(base->maincontext);
665
666 if (trace_sort)
667 elog(LOG,
668 "begin datum sort: workMem = %d, randomAccess = %c",
669 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
670
671 base->nKeys = 1; /* always a one-column sort */
672
673 TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
674 false, /* no unique check */
675 1,
676 workMem,
677 sortopt & TUPLESORT_RANDOMACCESS,
678 PARALLEL_SORT(coordinate));
679
683 base->writetup = writetup_datum;
684 base->readtup = readtup_datum;
685 base->haveDatum1 = true;
686 base->arg = arg;
687
688 arg->datumType = datumType;
689
690 /* lookup necessary attributes of the datum type */
691 get_typlenbyval(datumType, &typlen, &typbyval);
692 arg->datumTypeLen = typlen;
693 base->tuples = !typbyval;
694
695 /* Prepare SortSupport data */
696 base->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
697
699 base->sortKeys->ssup_collation = sortCollation;
700 base->sortKeys->ssup_nulls_first = nullsFirstFlag;
701
702 /*
703 * Abbreviation is possible here only for by-reference types. In theory,
704 * a pass-by-value datatype could have an abbreviated form that is cheaper
705 * to compare. In a tuple sort, we could support that, because we can
706 * always extract the original datum from the tuple as needed. Here, we
707 * can't, because a datum sort only stores a single copy of the datum; the
708 * "tuple" field of each SortTuple is NULL.
709 */
710 base->sortKeys->abbreviate = !typbyval;
711
712 PrepareSortSupportFromOrderingOp(sortOperator, base->sortKeys);
713
714 /*
715 * The "onlyKey" optimization cannot be used with abbreviated keys, since
716 * tie-breaker comparisons may be required. Typically, the optimization
717 * is only of value to pass-by-value types anyway, whereas abbreviated
718 * keys are typically only of value to pass-by-reference types.
719 */
720 if (!base->sortKeys->abbrev_converter)
721 base->onlyKey = base->sortKeys;
722
723 MemoryContextSwitchTo(oldcontext);
724
725 return state;
726}
727
728/*
729 * Accept one tuple while collecting input data for sort.
730 *
731 * Note that the input data is always copied; the caller need not save it.
732 */
733void
735{
738 TupleDesc tupDesc = (TupleDesc) base->arg;
739 SortTuple stup;
740 MinimalTuple tuple;
741 HeapTupleData htup;
742 Size tuplen;
743
744 /* copy the tuple into sort storage */
745 tuple = ExecCopySlotMinimalTuple(slot);
746 stup.tuple = tuple;
747 /* set up first-column key value */
748 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
749 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
750 stup.datum1 = heap_getattr(&htup,
751 base->sortKeys[0].ssup_attno,
752 tupDesc,
753 &stup.isnull1);
754
755 /* GetMemoryChunkSpace is not supported for bump contexts */
757 tuplen = MAXALIGN(tuple->t_len);
758 else
759 tuplen = GetMemoryChunkSpace(tuple);
760
762 base->sortKeys->abbrev_converter &&
763 !stup.isnull1, tuplen);
764
765 MemoryContextSwitchTo(oldcontext);
766}
767
768/*
769 * Accept one tuple while collecting input data for sort.
770 *
771 * Note that the input data is always copied; the caller need not save it.
772 */
773void
775{
776 SortTuple stup;
780 Size tuplen;
781
782 /* copy the tuple into sort storage */
783 tup = heap_copytuple(tup);
784 stup.tuple = tup;
785
786 /*
787 * set up first-column key value, and potentially abbreviate, if it's a
788 * simple column
789 */
790 if (base->haveDatum1)
791 {
792 stup.datum1 = heap_getattr(tup,
793 arg->indexInfo->ii_IndexAttrNumbers[0],
794 arg->tupDesc,
795 &stup.isnull1);
796 }
797
798 /* GetMemoryChunkSpace is not supported for bump contexts */
800 tuplen = MAXALIGN(HEAPTUPLESIZE + tup->t_len);
801 else
802 tuplen = GetMemoryChunkSpace(tup);
803
805 base->haveDatum1 &&
806 base->sortKeys->abbrev_converter &&
807 !stup.isnull1, tuplen);
808
809 MemoryContextSwitchTo(oldcontext);
810}
811
812/*
813 * Collect one index tuple while collecting input data for sort, building
814 * it from caller-supplied values.
815 */
816void
818 ItemPointer self, const Datum *values,
819 const bool *isnull)
820{
821 SortTuple stup;
822 IndexTuple tuple;
825 Size tuplen;
826
828 isnull, base->tuplecontext);
829 tuple = ((IndexTuple) stup.tuple);
830 tuple->t_tid = *self;
831 /* set up first-column key value */
832 stup.datum1 = index_getattr(tuple,
833 1,
834 RelationGetDescr(arg->indexRel),
835 &stup.isnull1);
836
837 /* GetMemoryChunkSpace is not supported for bump contexts */
839 tuplen = MAXALIGN(tuple->t_info & INDEX_SIZE_MASK);
840 else
841 tuplen = GetMemoryChunkSpace(tuple);
842
844 base->sortKeys &&
845 base->sortKeys->abbrev_converter &&
846 !stup.isnull1, tuplen);
847}
848
849/*
850 * Collect one BRIN tuple while collecting input data for sort.
851 */
852void
854{
855 SortTuple stup;
856 BrinSortTuple *bstup;
859 Size tuplen;
860
861 /* allocate space for the whole BRIN sort tuple */
862 bstup = palloc(BRINSORTTUPLE_SIZE(size));
863
864 bstup->tuplen = size;
865 memcpy(&bstup->tuple, tuple, size);
866
867 stup.tuple = bstup;
868 stup.datum1 = tuple->bt_blkno;
869 stup.isnull1 = false;
870
871 /* GetMemoryChunkSpace is not supported for bump contexts */
873 tuplen = MAXALIGN(BRINSORTTUPLE_SIZE(size));
874 else
875 tuplen = GetMemoryChunkSpace(bstup);
876
878 base->sortKeys &&
879 base->sortKeys->abbrev_converter &&
880 !stup.isnull1, tuplen);
881
882 MemoryContextSwitchTo(oldcontext);
883}
884
885void
887{
888 SortTuple stup;
889 GinTuple *ctup;
892 Size tuplen;
893
894 /* copy the GinTuple into the right memory context */
895 ctup = palloc(size);
896 memcpy(ctup, tuple, size);
897
898 stup.tuple = ctup;
899 stup.datum1 = (Datum) 0;
900 stup.isnull1 = false;
901
902 /* GetMemoryChunkSpace is not supported for bump contexts */
904 tuplen = MAXALIGN(size);
905 else
906 tuplen = GetMemoryChunkSpace(ctup);
907
909 base->sortKeys &&
910 base->sortKeys->abbrev_converter &&
911 !stup.isnull1, tuplen);
912
913 MemoryContextSwitchTo(oldcontext);
914}
915
916/*
917 * Accept one Datum while collecting input data for sort.
918 *
919 * If the Datum is pass-by-ref type, the value will be copied.
920 */
921void
923{
927 SortTuple stup;
928
929 /*
930 * Pass-by-value types or null values are just stored directly in
931 * stup.datum1 (and stup.tuple is not used and set to NULL).
932 *
933 * Non-null pass-by-reference values need to be copied into memory we
934 * control, and possibly abbreviated. The copied value is pointed to by
935 * stup.tuple and is treated as the canonical copy (e.g. to return via
936 * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
937 * abbreviated value if abbreviation is happening, otherwise it's
938 * identical to stup.tuple.
939 */
940
941 if (isNull || !base->tuples)
942 {
943 /*
944 * Set datum1 to zeroed representation for NULLs (to be consistent,
945 * and to support cheap inequality tests for NULL abbreviated keys).
946 */
947 stup.datum1 = !isNull ? val : (Datum) 0;
948 stup.isnull1 = isNull;
949 stup.tuple = NULL; /* no separate storage */
950 }
951 else
952 {
953 stup.isnull1 = false;
954 stup.datum1 = datumCopy(val, false, arg->datumTypeLen);
955 stup.tuple = DatumGetPointer(stup.datum1);
956 }
957
959 base->tuples &&
960 base->sortKeys->abbrev_converter && !isNull, 0);
961
962 MemoryContextSwitchTo(oldcontext);
963}
964
965/*
966 * Fetch the next tuple in either forward or back direction.
967 * If successful, put tuple in slot and return true; else, clear the slot
968 * and return false.
969 *
970 * Caller may optionally be passed back abbreviated value (on true return
971 * value) when abbreviation was used, which can be used to cheaply avoid
972 * equality checks that might otherwise be required. Caller can safely make a
973 * determination of "non-equal tuple" based on simple binary inequality. A
974 * NULL value in leading attribute will set abbreviated value to zeroed
975 * representation, which caller may rely on in abbreviated inequality check.
976 *
977 * If copy is true, the slot receives a tuple that's been copied into the
978 * caller's memory context, so that it will stay valid regardless of future
979 * manipulations of the tuplesort's state (up to and including deleting the
980 * tuplesort). If copy is false, the slot will just receive a pointer to a
981 * tuple held within the tuplesort, which is more efficient, but only safe for
982 * callers that are prepared to have any subsequent manipulation of the
983 * tuplesort's state invalidate slot contents.
984 */
985bool
987 TupleTableSlot *slot, Datum *abbrev)
988{
991 SortTuple stup;
992
993 if (!tuplesort_gettuple_common(state, forward, &stup))
994 stup.tuple = NULL;
995
996 MemoryContextSwitchTo(oldcontext);
997
998 if (stup.tuple)
999 {
1000 /* Record abbreviated key for caller */
1001 if (base->sortKeys->abbrev_converter && abbrev)
1002 *abbrev = stup.datum1;
1003
1004 if (copy)
1006
1007 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, copy);
1008 return true;
1009 }
1010 else
1011 {
1012 ExecClearTuple(slot);
1013 return false;
1014 }
1015}
1016
1017/*
1018 * Fetch the next tuple in either forward or back direction.
1019 * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
1020 * context, and must not be freed by caller. Caller may not rely on tuple
1021 * remaining valid after any further manipulation of tuplesort.
1022 */
1025{
1028 SortTuple stup;
1029
1030 if (!tuplesort_gettuple_common(state, forward, &stup))
1031 stup.tuple = NULL;
1032
1033 MemoryContextSwitchTo(oldcontext);
1034
1035 return stup.tuple;
1036}
1037
1038/*
1039 * Fetch the next index tuple in either forward or back direction.
1040 * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
1041 * context, and must not be freed by caller. Caller may not rely on tuple
1042 * remaining valid after any further manipulation of tuplesort.
1043 */
1046{
1049 SortTuple stup;
1050
1051 if (!tuplesort_gettuple_common(state, forward, &stup))
1052 stup.tuple = NULL;
1053
1054 MemoryContextSwitchTo(oldcontext);
1055
1056 return (IndexTuple) stup.tuple;
1057}
1058
1059/*
1060 * Fetch the next BRIN tuple in either forward or back direction.
1061 * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
1062 * context, and must not be freed by caller. Caller may not rely on tuple
1063 * remaining valid after any further manipulation of tuplesort.
1064 */
1065BrinTuple *
1067{
1070 SortTuple stup;
1071 BrinSortTuple *btup;
1072
1073 if (!tuplesort_gettuple_common(state, forward, &stup))
1074 stup.tuple = NULL;
1075
1076 MemoryContextSwitchTo(oldcontext);
1077
1078 if (!stup.tuple)
1079 return NULL;
1080
1081 btup = (BrinSortTuple *) stup.tuple;
1082
1083 *len = btup->tuplen;
1084
1085 return &btup->tuple;
1086}
1087
1088GinTuple *
1090{
1093 SortTuple stup;
1094 GinTuple *tup;
1095
1096 if (!tuplesort_gettuple_common(state, forward, &stup))
1097 stup.tuple = NULL;
1098
1099 MemoryContextSwitchTo(oldcontext);
1100
1101 if (!stup.tuple)
1102 return false;
1103
1104 tup = (GinTuple *) stup.tuple;
1105
1106 *len = tup->tuplen;
1107
1108 return tup;
1109}
1110
1111/*
1112 * Fetch the next Datum in either forward or back direction.
1113 * Returns false if no more datums.
1114 *
1115 * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
1116 * in caller's context, and is now owned by the caller (this differs from
1117 * similar routines for other types of tuplesorts).
1118 *
1119 * Caller may optionally be passed back abbreviated value (on true return
1120 * value) when abbreviation was used, which can be used to cheaply avoid
1121 * equality checks that might otherwise be required. Caller can safely make a
1122 * determination of "non-equal tuple" based on simple binary inequality. A
1123 * NULL value will have a zeroed abbreviated value representation, which caller
1124 * may rely on in abbreviated inequality check.
1125 *
1126 * For byref Datums, if copy is true, *val is set to a copy of the Datum
1127 * copied into the caller's memory context, so that it will stay valid
1128 * regardless of future manipulations of the tuplesort's state (up to and
1129 * including deleting the tuplesort). If copy is false, *val will just be
1130 * set to a pointer to the Datum held within the tuplesort, which is more
1131 * efficient, but only safe for callers that are prepared to have any
1132 * subsequent manipulation of the tuplesort's state invalidate slot contents.
1133 * For byval Datums, the value of the 'copy' parameter has no effect.
1134
1135 */
1136bool
1137tuplesort_getdatum(Tuplesortstate *state, bool forward, bool copy,
1138 Datum *val, bool *isNull, Datum *abbrev)
1139{
1143 SortTuple stup;
1144
1145 if (!tuplesort_gettuple_common(state, forward, &stup))
1146 {
1147 MemoryContextSwitchTo(oldcontext);
1148 return false;
1149 }
1150
1151 /* Ensure we copy into caller's memory context */
1152 MemoryContextSwitchTo(oldcontext);
1153
1154 /* Record abbreviated key for caller */
1155 if (base->sortKeys->abbrev_converter && abbrev)
1156 *abbrev = stup.datum1;
1157
1158 if (stup.isnull1 || !base->tuples)
1159 {
1160 *val = stup.datum1;
1161 *isNull = stup.isnull1;
1162 }
1163 else
1164 {
1165 /* use stup.tuple because stup.datum1 may be an abbreviation */
1166 if (copy)
1167 *val = datumCopy(PointerGetDatum(stup.tuple), false,
1168 arg->datumTypeLen);
1169 else
1170 *val = PointerGetDatum(stup.tuple);
1171 *isNull = false;
1172 }
1173
1174 return true;
1175}
1176
1177
1178/*
1179 * Routines specialized for HeapTuple (actually MinimalTuple) case
1180 */
1181
1182static void
1184{
1185 int i;
1187
1188 for (i = 0; i < count; i++)
1189 {
1190 HeapTupleData htup;
1191
1192 htup.t_len = ((MinimalTuple) stups[i].tuple)->t_len +
1194 htup.t_data = (HeapTupleHeader) ((char *) stups[i].tuple -
1196 stups[i].datum1 = heap_getattr(&htup,
1197 base->sortKeys[0].ssup_attno,
1198 (TupleDesc) base->arg,
1199 &stups[i].isnull1);
1200 }
1201}
1202
1203static int
1205{
1207 SortSupport sortKey = base->sortKeys;
1208 int32 compare;
1209
1210
1211 /* Compare the leading sort key */
1212 compare = ApplySortComparator(a->datum1, a->isnull1,
1213 b->datum1, b->isnull1,
1214 sortKey);
1215 if (compare != 0)
1216 return compare;
1217
1218 /* Compare additional sort keys */
1220}
1221
1222static int
1224{
1226 SortSupport sortKey = base->sortKeys;
1227 HeapTupleData ltup;
1228 HeapTupleData rtup;
1229 TupleDesc tupDesc;
1230 int nkey;
1231 int32 compare;
1232 AttrNumber attno;
1233 Datum datum1,
1234 datum2;
1235 bool isnull1,
1236 isnull2;
1237
1238 ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
1239 ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
1240 rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
1241 rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
1242 tupDesc = (TupleDesc) base->arg;
1243
1244 if (sortKey->abbrev_converter)
1245 {
1246 attno = sortKey->ssup_attno;
1247
1248 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
1249 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
1250
1251 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
1252 datum2, isnull2,
1253 sortKey);
1254 if (compare != 0)
1255 return compare;
1256 }
1257
1258 sortKey++;
1259 for (nkey = 1; nkey < base->nKeys; nkey++, sortKey++)
1260 {
1261 attno = sortKey->ssup_attno;
1262
1263 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
1264 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
1265
1266 compare = ApplySortComparator(datum1, isnull1,
1267 datum2, isnull2,
1268 sortKey);
1269 if (compare != 0)
1270 return compare;
1271 }
1272
1273 return 0;
1274}
1275
1276static void
1278{
1280 MinimalTuple tuple = (MinimalTuple) stup->tuple;
1281
1282 /* the part of the MinimalTuple we'll write: */
1283 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
1284 unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
1285
1286 /* total on-disk footprint: */
1287 unsigned int tuplen = tupbodylen + sizeof(int);
1288
1289 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1290 LogicalTapeWrite(tape, tupbody, tupbodylen);
1291 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1292 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1293}
1294
1295static void
1297 LogicalTape *tape, unsigned int len)
1298{
1299 unsigned int tupbodylen = len - sizeof(int);
1300 unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
1302 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
1304 HeapTupleData htup;
1305
1306 /* read in the tuple proper */
1307 tuple->t_len = tuplen;
1308 LogicalTapeReadExact(tape, tupbody, tupbodylen);
1309 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1310 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1311 stup->tuple = tuple;
1312 /* set up first-column key value */
1313 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
1314 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
1315 stup->datum1 = heap_getattr(&htup,
1316 base->sortKeys[0].ssup_attno,
1317 (TupleDesc) base->arg,
1318 &stup->isnull1);
1319}
1320
1321/*
1322 * Routines specialized for the CLUSTER case (HeapTuple data, with
1323 * comparisons per a btree index definition)
1324 */
1325
1326static void
1328{
1329 int i;
1332
1333 for (i = 0; i < count; i++)
1334 {
1335 HeapTuple tup;
1336
1337 tup = (HeapTuple) stups[i].tuple;
1338 stups[i].datum1 = heap_getattr(tup,
1339 arg->indexInfo->ii_IndexAttrNumbers[0],
1340 arg->tupDesc,
1341 &stups[i].isnull1);
1342 }
1343}
1344
1345static int
1348{
1350 SortSupport sortKey = base->sortKeys;
1351 int32 compare;
1352
1353 /* Compare the leading sort key, if it's simple */
1354 if (base->haveDatum1)
1355 {
1356 compare = ApplySortComparator(a->datum1, a->isnull1,
1357 b->datum1, b->isnull1,
1358 sortKey);
1359 if (compare != 0)
1360 return compare;
1361 }
1362
1364}
1365
1366static int
1369{
1372 SortSupport sortKey = base->sortKeys;
1373 HeapTuple ltup;
1374 HeapTuple rtup;
1375 TupleDesc tupDesc;
1376 int nkey;
1377 int32 compare = 0;
1378 Datum datum1,
1379 datum2;
1380 bool isnull1,
1381 isnull2;
1382
1383 ltup = (HeapTuple) a->tuple;
1384 rtup = (HeapTuple) b->tuple;
1385 tupDesc = arg->tupDesc;
1386
1387 /* Compare the leading sort key, if it's simple */
1388 if (base->haveDatum1)
1389 {
1390 if (sortKey->abbrev_converter)
1391 {
1392 AttrNumber leading = arg->indexInfo->ii_IndexAttrNumbers[0];
1393
1394 datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
1395 datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
1396
1397 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
1398 datum2, isnull2,
1399 sortKey);
1400 }
1401 if (compare != 0 || base->nKeys == 1)
1402 return compare;
1403 /* Compare additional columns the hard way */
1404 sortKey++;
1405 nkey = 1;
1406 }
1407 else
1408 {
1409 /* Must compare all keys the hard way */
1410 nkey = 0;
1411 }
1412
1413 if (arg->indexInfo->ii_Expressions == NULL)
1414 {
1415 /* If not expression index, just compare the proper heap attrs */
1416
1417 for (; nkey < base->nKeys; nkey++, sortKey++)
1418 {
1419 AttrNumber attno = arg->indexInfo->ii_IndexAttrNumbers[nkey];
1420
1421 datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
1422 datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
1423
1424 compare = ApplySortComparator(datum1, isnull1,
1425 datum2, isnull2,
1426 sortKey);
1427 if (compare != 0)
1428 return compare;
1429 }
1430 }
1431 else
1432 {
1433 /*
1434 * In the expression index case, compute the whole index tuple and
1435 * then compare values. It would perhaps be faster to compute only as
1436 * many columns as we need to compare, but that would require
1437 * duplicating all the logic in FormIndexDatum.
1438 */
1439 Datum l_index_values[INDEX_MAX_KEYS];
1440 bool l_index_isnull[INDEX_MAX_KEYS];
1441 Datum r_index_values[INDEX_MAX_KEYS];
1442 bool r_index_isnull[INDEX_MAX_KEYS];
1443 TupleTableSlot *ecxt_scantuple;
1444
1445 /* Reset context each time to prevent memory leakage */
1447
1448 ecxt_scantuple = GetPerTupleExprContext(arg->estate)->ecxt_scantuple;
1449
1450 ExecStoreHeapTuple(ltup, ecxt_scantuple, false);
1451 FormIndexDatum(arg->indexInfo, ecxt_scantuple, arg->estate,
1452 l_index_values, l_index_isnull);
1453
1454 ExecStoreHeapTuple(rtup, ecxt_scantuple, false);
1455 FormIndexDatum(arg->indexInfo, ecxt_scantuple, arg->estate,
1456 r_index_values, r_index_isnull);
1457
1458 for (; nkey < base->nKeys; nkey++, sortKey++)
1459 {
1460 compare = ApplySortComparator(l_index_values[nkey],
1461 l_index_isnull[nkey],
1462 r_index_values[nkey],
1463 r_index_isnull[nkey],
1464 sortKey);
1465 if (compare != 0)
1466 return compare;
1467 }
1468 }
1469
1470 return 0;
1471}
1472
1473static void
1475{
1477 HeapTuple tuple = (HeapTuple) stup->tuple;
1478 unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
1479
1480 /* We need to store t_self, but not other fields of HeapTupleData */
1481 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1482 LogicalTapeWrite(tape, &tuple->t_self, sizeof(ItemPointerData));
1483 LogicalTapeWrite(tape, tuple->t_data, tuple->t_len);
1484 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1485 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1486}
1487
1488static void
1490 LogicalTape *tape, unsigned int tuplen)
1491{
1494 unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
1496 t_len + HEAPTUPLESIZE);
1497
1498 /* Reconstruct the HeapTupleData header */
1499 tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
1500 tuple->t_len = t_len;
1501 LogicalTapeReadExact(tape, &tuple->t_self, sizeof(ItemPointerData));
1502 /* We don't currently bother to reconstruct t_tableOid */
1503 tuple->t_tableOid = InvalidOid;
1504 /* Read in the tuple body */
1505 LogicalTapeReadExact(tape, tuple->t_data, tuple->t_len);
1506 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1507 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1508 stup->tuple = tuple;
1509 /* set up first-column key value, if it's a simple column */
1510 if (base->haveDatum1)
1511 stup->datum1 = heap_getattr(tuple,
1512 arg->indexInfo->ii_IndexAttrNumbers[0],
1513 arg->tupDesc,
1514 &stup->isnull1);
1515}
1516
1517static void
1519{
1522
1523 /* Free any execution state created for CLUSTER case */
1524 if (arg->estate != NULL)
1525 {
1526 ExprContext *econtext = GetPerTupleExprContext(arg->estate);
1527
1529 FreeExecutorState(arg->estate);
1530 }
1531}
1532
1533/*
1534 * Routines specialized for IndexTuple case
1535 *
1536 * The btree and hash cases require separate comparison functions, but the
1537 * IndexTuple representation is the same so the copy/write/read support
1538 * functions can be shared.
1539 */
1540
1541static void
1543{
1546 int i;
1547
1548 for (i = 0; i < count; i++)
1549 {
1550 IndexTuple tuple;
1551
1552 tuple = stups[i].tuple;
1553 stups[i].datum1 = index_getattr(tuple,
1554 1,
1555 RelationGetDescr(arg->indexRel),
1556 &stups[i].isnull1);
1557 }
1558}
1559
1560static int
1563{
1564 /*
1565 * This is similar to comparetup_heap(), but expects index tuples. There
1566 * is also special handling for enforcing uniqueness, and special
1567 * treatment for equal keys at the end.
1568 */
1570 SortSupport sortKey = base->sortKeys;
1571 int32 compare;
1572
1573 /* Compare the leading sort key */
1574 compare = ApplySortComparator(a->datum1, a->isnull1,
1575 b->datum1, b->isnull1,
1576 sortKey);
1577 if (compare != 0)
1578 return compare;
1579
1580 /* Compare additional sort keys */
1582}
1583
1584static int
1587{
1590 SortSupport sortKey = base->sortKeys;
1591 IndexTuple tuple1;
1592 IndexTuple tuple2;
1593 int keysz;
1594 TupleDesc tupDes;
1595 bool equal_hasnull = false;
1596 int nkey;
1597 int32 compare;
1598 Datum datum1,
1599 datum2;
1600 bool isnull1,
1601 isnull2;
1602
1603 tuple1 = (IndexTuple) a->tuple;
1604 tuple2 = (IndexTuple) b->tuple;
1605 keysz = base->nKeys;
1606 tupDes = RelationGetDescr(arg->index.indexRel);
1607
1608 if (sortKey->abbrev_converter)
1609 {
1610 datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
1611 datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
1612
1613 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
1614 datum2, isnull2,
1615 sortKey);
1616 if (compare != 0)
1617 return compare;
1618 }
1619
1620 /* they are equal, so we only need to examine one null flag */
1621 if (a->isnull1)
1622 equal_hasnull = true;
1623
1624 sortKey++;
1625 for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
1626 {
1627 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
1628 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
1629
1630 compare = ApplySortComparator(datum1, isnull1,
1631 datum2, isnull2,
1632 sortKey);
1633 if (compare != 0)
1634 return compare; /* done when we find unequal attributes */
1635
1636 /* they are equal, so we only need to examine one null flag */
1637 if (isnull1)
1638 equal_hasnull = true;
1639 }
1640
1641 /*
1642 * If btree has asked us to enforce uniqueness, complain if two equal
1643 * tuples are detected (unless there was at least one NULL field and NULLS
1644 * NOT DISTINCT was not set).
1645 *
1646 * It is sufficient to make the test here, because if two tuples are equal
1647 * they *must* get compared at some stage of the sort --- otherwise the
1648 * sort algorithm wouldn't have checked whether one must appear before the
1649 * other.
1650 */
1651 if (arg->enforceUnique && !(!arg->uniqueNullsNotDistinct && equal_hasnull))
1652 {
1654 bool isnull[INDEX_MAX_KEYS];
1655 char *key_desc;
1656
1657 /*
1658 * Some rather brain-dead implementations of qsort (such as the one in
1659 * QNX 4) will sometimes call the comparison routine to compare a
1660 * value to itself, but we always use our own implementation, which
1661 * does not.
1662 */
1663 Assert(tuple1 != tuple2);
1664
1665 index_deform_tuple(tuple1, tupDes, values, isnull);
1666
1667 key_desc = BuildIndexValueDescription(arg->index.indexRel, values, isnull);
1668
1669 ereport(ERROR,
1670 (errcode(ERRCODE_UNIQUE_VIOLATION),
1671 errmsg("could not create unique index \"%s\"",
1672 RelationGetRelationName(arg->index.indexRel)),
1673 key_desc ? errdetail("Key %s is duplicated.", key_desc) :
1674 errdetail("Duplicate keys exist."),
1675 errtableconstraint(arg->index.heapRel,
1676 RelationGetRelationName(arg->index.indexRel))));
1677 }
1678
1679 /*
1680 * If key values are equal, we sort on ItemPointer. This is required for
1681 * btree indexes, since heap TID is treated as an implicit last key
1682 * attribute in order to ensure that all keys in the index are physically
1683 * unique.
1684 */
1685 {
1686 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
1687 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
1688
1689 if (blk1 != blk2)
1690 return (blk1 < blk2) ? -1 : 1;
1691 }
1692 {
1693 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
1694 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
1695
1696 if (pos1 != pos2)
1697 return (pos1 < pos2) ? -1 : 1;
1698 }
1699
1700 /* ItemPointer values should never be equal */
1701 Assert(false);
1702
1703 return 0;
1704}
1705
1706static int
1709{
1710 Bucket bucket1;
1711 Bucket bucket2;
1712 uint32 hash1;
1713 uint32 hash2;
1714 IndexTuple tuple1;
1715 IndexTuple tuple2;
1718
1719 /*
1720 * Fetch hash keys and mask off bits we don't want to sort by, so that the
1721 * initial sort is just on the bucket number. We know that the first
1722 * column of the index tuple is the hash key.
1723 */
1724 Assert(!a->isnull1);
1725 bucket1 = _hash_hashkey2bucket(DatumGetUInt32(a->datum1),
1726 arg->max_buckets, arg->high_mask,
1727 arg->low_mask);
1728 Assert(!b->isnull1);
1729 bucket2 = _hash_hashkey2bucket(DatumGetUInt32(b->datum1),
1730 arg->max_buckets, arg->high_mask,
1731 arg->low_mask);
1732 if (bucket1 > bucket2)
1733 return 1;
1734 else if (bucket1 < bucket2)
1735 return -1;
1736
1737 /*
1738 * If bucket values are equal, sort by hash values. This allows us to
1739 * insert directly onto bucket/overflow pages, where the index tuples are
1740 * stored in hash order to allow fast binary search within each page.
1741 */
1742 hash1 = DatumGetUInt32(a->datum1);
1743 hash2 = DatumGetUInt32(b->datum1);
1744 if (hash1 > hash2)
1745 return 1;
1746 else if (hash1 < hash2)
1747 return -1;
1748
1749 /*
1750 * If hash values are equal, we sort on ItemPointer. This does not affect
1751 * validity of the finished index, but it may be useful to have index
1752 * scans in physical order.
1753 */
1754 tuple1 = (IndexTuple) a->tuple;
1755 tuple2 = (IndexTuple) b->tuple;
1756
1757 {
1760
1761 if (blk1 != blk2)
1762 return (blk1 < blk2) ? -1 : 1;
1763 }
1764 {
1767
1768 if (pos1 != pos2)
1769 return (pos1 < pos2) ? -1 : 1;
1770 }
1771
1772 /* ItemPointer values should never be equal */
1773 Assert(false);
1774
1775 return 0;
1776}
1777
1778/*
1779 * Sorting for hash indexes only uses one sort key, so this shouldn't ever be
1780 * called. It's only here for consistency.
1781 */
1782static int
1785{
1786 Assert(false);
1787
1788 return 0;
1789}
1790
1791static void
1793{
1795 IndexTuple tuple = (IndexTuple) stup->tuple;
1796 unsigned int tuplen;
1797
1798 tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
1799 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1800 LogicalTapeWrite(tape, tuple, IndexTupleSize(tuple));
1801 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1802 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1803}
1804
1805static void
1807 LogicalTape *tape, unsigned int len)
1808{
1811 unsigned int tuplen = len - sizeof(unsigned int);
1813
1814 LogicalTapeReadExact(tape, tuple, tuplen);
1815 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1816 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1817 stup->tuple = tuple;
1818 /* set up first-column key value */
1819 stup->datum1 = index_getattr(tuple,
1820 1,
1821 RelationGetDescr(arg->indexRel),
1822 &stup->isnull1);
1823}
1824
1825/*
1826 * Routines specialized for BrinTuple case
1827 */
1828
1829static void
1831{
1832 int i;
1833
1834 for (i = 0; i < count; i++)
1835 {
1836 BrinSortTuple *tuple;
1837
1838 tuple = stups[i].tuple;
1839 stups[i].datum1 = tuple->tuple.bt_blkno;
1840 }
1841}
1842
1843static int
1846{
1847 Assert(TuplesortstateGetPublic(state)->haveDatum1);
1848
1849 if (DatumGetUInt32(a->datum1) > DatumGetUInt32(b->datum1))
1850 return 1;
1851
1852 if (DatumGetUInt32(a->datum1) < DatumGetUInt32(b->datum1))
1853 return -1;
1854
1855 /* silence compilers */
1856 return 0;
1857}
1858
1859static void
1861{
1863 BrinSortTuple *tuple = (BrinSortTuple *) stup->tuple;
1864 unsigned int tuplen = tuple->tuplen;
1865
1866 tuplen = tuplen + sizeof(tuplen);
1867 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1868 LogicalTapeWrite(tape, &tuple->tuple, tuple->tuplen);
1869 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1870 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1871}
1872
1873static void
1875 LogicalTape *tape, unsigned int len)
1876{
1877 BrinSortTuple *tuple;
1879 unsigned int tuplen = len - sizeof(unsigned int);
1880
1881 /*
1882 * Allocate space for the BRIN sort tuple, which is BrinTuple with an
1883 * extra length field.
1884 */
1886 BRINSORTTUPLE_SIZE(tuplen));
1887
1888 tuple->tuplen = tuplen;
1889
1890 LogicalTapeReadExact(tape, &tuple->tuple, tuplen);
1891 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1892 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1893 stup->tuple = tuple;
1894
1895 /* set up first-column key value, which is block number */
1896 stup->datum1 = tuple->tuple.bt_blkno;
1897}
1898
1899/*
1900 * Routines specialized for GIN case
1901 */
1902
1903static void
1905{
1906 Assert(false);
1907 elog(ERROR, "removeabbrev_index_gin not implemented");
1908}
1909
1910static int
1913{
1915
1916 Assert(!TuplesortstateGetPublic(state)->haveDatum1);
1917
1918 return _gin_compare_tuples((GinTuple *) a->tuple,
1919 (GinTuple *) b->tuple,
1920 base->sortKeys);
1921}
1922
1923static void
1925{
1927 GinTuple *tuple = (GinTuple *) stup->tuple;
1928 unsigned int tuplen = tuple->tuplen;
1929
1930 tuplen = tuplen + sizeof(tuplen);
1931 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1932 LogicalTapeWrite(tape, tuple, tuple->tuplen);
1933 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1934 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1935}
1936
1937static void
1939 LogicalTape *tape, unsigned int len)
1940{
1941 GinTuple *tuple;
1943 unsigned int tuplen = len - sizeof(unsigned int);
1944
1945 /*
1946 * Allocate space for the GIN sort tuple, which already has the proper
1947 * length included in the header.
1948 */
1949 tuple = (GinTuple *) tuplesort_readtup_alloc(state, tuplen);
1950
1951 tuple->tuplen = tuplen;
1952
1953 LogicalTapeReadExact(tape, tuple, tuplen);
1954 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1955 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1956 stup->tuple = (void *) tuple;
1957
1958 /* no abbreviations (FIXME maybe use attrnum for this?) */
1959 stup->datum1 = (Datum) 0;
1960}
1961
1962/*
1963 * Routines specialized for DatumTuple case
1964 */
1965
1966static void
1968{
1969 int i;
1970
1971 for (i = 0; i < count; i++)
1972 stups[i].datum1 = PointerGetDatum(stups[i].tuple);
1973}
1974
1975static int
1977{
1979 int compare;
1980
1981 compare = ApplySortComparator(a->datum1, a->isnull1,
1982 b->datum1, b->isnull1,
1983 base->sortKeys);
1984 if (compare != 0)
1985 return compare;
1986
1988}
1989
1990static int
1992{
1994 int32 compare = 0;
1995
1996 /* if we have abbreviations, then "tuple" has the original value */
1997 if (base->sortKeys->abbrev_converter)
1999 PointerGetDatum(b->tuple), b->isnull1,
2000 base->sortKeys);
2001
2002 return compare;
2003}
2004
2005static void
2007{
2010 void *waddr;
2011 unsigned int tuplen;
2012 unsigned int writtenlen;
2013
2014 if (stup->isnull1)
2015 {
2016 waddr = NULL;
2017 tuplen = 0;
2018 }
2019 else if (!base->tuples)
2020 {
2021 waddr = &stup->datum1;
2022 tuplen = sizeof(Datum);
2023 }
2024 else
2025 {
2026 waddr = stup->tuple;
2027 tuplen = datumGetSize(PointerGetDatum(stup->tuple), false, arg->datumTypeLen);
2028 Assert(tuplen != 0);
2029 }
2030
2031 writtenlen = tuplen + sizeof(unsigned int);
2032
2033 LogicalTapeWrite(tape, &writtenlen, sizeof(writtenlen));
2034 LogicalTapeWrite(tape, waddr, tuplen);
2035 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
2036 LogicalTapeWrite(tape, &writtenlen, sizeof(writtenlen));
2037}
2038
2039static void
2041 LogicalTape *tape, unsigned int len)
2042{
2044 unsigned int tuplen = len - sizeof(unsigned int);
2045
2046 if (tuplen == 0)
2047 {
2048 /* it's NULL */
2049 stup->datum1 = (Datum) 0;
2050 stup->isnull1 = true;
2051 stup->tuple = NULL;
2052 }
2053 else if (!base->tuples)
2054 {
2055 Assert(tuplen == sizeof(Datum));
2056 LogicalTapeReadExact(tape, &stup->datum1, tuplen);
2057 stup->isnull1 = false;
2058 stup->tuple = NULL;
2059 }
2060 else
2061 {
2062 void *raddr = tuplesort_readtup_alloc(state, tuplen);
2063
2064 LogicalTapeReadExact(tape, raddr, tuplen);
2065 stup->datum1 = PointerGetDatum(raddr);
2066 stup->isnull1 = false;
2067 stup->tuple = raddr;
2068 }
2069
2070 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
2071 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
2072}
int16 AttrNumber
Definition: attnum.h:21
uint32 BlockNumber
Definition: block.h:31
static Datum values[MAXATTR]
Definition: bootstrap.c:151
#define MAXALIGN(LEN)
Definition: c.h:782
int16_t int16
Definition: c.h:497
int32_t int32
Definition: c.h:498
uint32_t uint32
Definition: c.h:502
#define OidIsValid(objectId)
Definition: c.h:746
size_t Size
Definition: c.h:576
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
Size datumGetSize(Datum value, bool typByVal, int typLen)
Definition: datum.c:65
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define LOG
Definition: elog.h:31
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1425
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1441
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1633
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:85
TupleTableSlot * ExecStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1539
void FreeExecutorState(EState *estate)
Definition: execUtils.c:193
EState * CreateExecutorState(void)
Definition: execUtils.c:88
#define ResetPerTupleExprContext(estate)
Definition: executor.h:646
#define GetPerTupleExprContext(estate)
Definition: executor.h:637
char * BuildIndexValueDescription(Relation indexRelation, const Datum *values, const bool *isnull)
Definition: genam.c:178
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
int _gin_compare_tuples(GinTuple *a, GinTuple *b, SortSupport ssup)
Definition: gininsert.c:2390
uint32 Bucket
Definition: hash.h:35
Assert(PointerIsAligned(start, uint64))
for(;;)
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:125
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:778
MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup)
Definition: heaptuple.c:1536
#define HEAPTUPLESIZE
Definition: htup.h:73
HeapTupleData * HeapTuple
Definition: htup.h:71
MinimalTupleData * MinimalTuple
Definition: htup.h:27
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MINIMAL_TUPLE_OFFSET
Definition: htup_details.h:669
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:903
#define MINIMAL_TUPLE_DATA_OFFSET
Definition: htup_details.h:673
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:2428
void FormIndexDatum(IndexInfo *indexInfo, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
Definition: index.c:2730
void index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:456
IndexTuple index_form_tuple_context(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull, MemoryContext context)
Definition: indextuple.c:65
long val
Definition: informix.c:689
int b
Definition: isn.c:71
int a
Definition: isn.c:70
int i
Definition: isn.c:74
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:78
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
IndexTupleData * IndexTuple
Definition: itup.h:53
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:131
static Size IndexTupleSize(const IndexTupleData *itup)
Definition: itup.h:71
#define INDEX_SIZE_MASK
Definition: itup.h:65
void LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size)
Definition: logtape.c:761
void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
Definition: lsyscache.c:2334
void pfree(void *pointer)
Definition: mcxt.c:1524
Size GetMemoryChunkSpace(void *pointer)
Definition: mcxt.c:721
void * palloc0(Size size)
Definition: mcxt.c:1347
void * palloc(Size size)
Definition: mcxt.c:1317
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1123
#define SK_BT_DESC
Definition: nbtree.h:1122
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:80
uint16 OffsetNumber
Definition: off.h:24
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:200
void * arg
#define INDEX_MAX_KEYS
const void size_t len
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:227
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:327
uintptr_t Datum
Definition: postgres.h:69
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:317
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
#define RelationGetDescr(relation)
Definition: rel.h:538
#define RelationGetNumberOfAttributes(relation)
Definition: rel.h:518
#define RelationGetRelationName(relation)
Definition: rel.h:546
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:531
int errtableconstraint(Relation rel, const char *conname)
Definition: relcache.c:6031
void PrepareSortSupportFromGistIndexRel(Relation indexRel, SortSupport ssup)
Definition: sortsupport.c:185
void PrepareSortSupportFromIndexRel(Relation indexRel, bool reverse, SortSupport ssup)
Definition: sortsupport.c:161
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
Definition: sortsupport.c:134
static int ApplySortAbbrevFullComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:341
struct SortSupportData * SortSupport
Definition: sortsupport.h:58
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:200
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:798
BlockNumber bt_blkno
Definition: brin_tuple.h:66
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:268
int tuplen
Definition: gin_tuple.h:22
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49
Oid * rd_indcollation
Definition: rel.h:217
Form_pg_class rd_rel
Definition: rel.h:111
int sk_flags
Definition: skey.h:66
Oid sk_collation
Definition: skey.h:70
AttrNumber sk_attno
Definition: skey.h:67
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
Datum(* abbrev_converter)(Datum original, SortSupport ssup)
Definition: sortsupport.h:172
MemoryContext ssup_cxt
Definition: sortsupport.h:66
bool isnull1
Definition: tuplesort.h:152
void * tuple
Definition: tuplesort.h:150
Datum datum1
Definition: tuplesort.h:151
TuplesortIndexArg index
TuplesortIndexArg index
SortSupport onlyKey
Definition: tuplesort.h:246
MemoryContext maincontext
Definition: tuplesort.h:219
void(* writetup)(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
Definition: tuplesort.h:195
void(* removeabbrev)(Tuplesortstate *state, SortTuple *stups, int count)
Definition: tuplesort.h:188
void(* freestate)(Tuplesortstate *state)
Definition: tuplesort.h:213
MemoryContext tuplecontext
Definition: tuplesort.h:222
MemoryContext sortcontext
Definition: tuplesort.h:221
void(* readtup)(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
Definition: tuplesort.h:204
SortTupleComparator comparetup
Definition: tuplesort.h:175
SortSupport sortKeys
Definition: tuplesort.h:236
SortTupleComparator comparetup_tiebreak
Definition: tuplesort.h:182
Definition: regguts.h:323
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:154
struct TupleDescData * TupleDesc
Definition: tupdesc.h:139
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
Definition: tuplesort.c:642
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
Definition: tuplesort.c:1169
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
Definition: tuplesort.c:1470
bool trace_sort
Definition: tuplesort.c:124
void * tuplesort_readtup_alloc(Tuplesortstate *state, Size tuplen)
Definition: tuplesort.c:2883
#define TupleSortUseBumpTupleCxt(opt)
Definition: tuplesort.h:109
#define PARALLEL_SORT(coordinate)
Definition: tuplesort.h:256
#define TUPLESORT_RANDOMACCESS
Definition: tuplesort.h:97
#define LogicalTapeReadExact(tape, ptr, len)
Definition: tuplesort.h:263
#define TuplesortstateGetPublic(state)
Definition: tuplesort.h:260
IndexTuple tuplesort_getindextuple(Tuplesortstate *state, bool forward)
static void writetup_index_brin(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
Tuplesortstate * tuplesort_begin_index_gin(Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
HeapTuple tuplesort_getheaptuple(Tuplesortstate *state, bool forward)
static void removeabbrev_datum(Tuplesortstate *state, SortTuple *stups, int count)
static int comparetup_index_btree_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
void tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
static int comparetup_index_btree(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
GinTuple * tuplesort_getgintuple(Tuplesortstate *state, Size *len, bool forward)
void tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel, ItemPointer self, const Datum *values, const bool *isnull)
static void readtup_index(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static int comparetup_cluster_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void writetup_index_gin(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
void tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
static int comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
Tuplesortstate * tuplesort_begin_index_brin(int workMem, SortCoordinate coordinate, int sortopt)
Tuplesortstate * tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, AttrNumber *attNums, Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags, int workMem, SortCoordinate coordinate, int sortopt)
Tuplesortstate * tuplesort_begin_cluster(TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
BrinTuple * tuplesort_getbrintuple(Tuplesortstate *state, Size *len, bool forward)
static void removeabbrev_index(Tuplesortstate *state, SortTuple *stups, int count)
Tuplesortstate * tuplesort_begin_index_btree(Relation heapRel, Relation indexRel, bool enforceUnique, bool uniqueNullsNotDistinct, int workMem, SortCoordinate coordinate, int sortopt)
static int comparetup_datum_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_datum(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
#define INDEX_SORT
static void readtup_heap(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void readtup_cluster(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int tuplen)
Tuplesortstate * tuplesort_begin_index_gist(Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
static void writetup_datum(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
struct BrinSortTuple BrinSortTuple
#define CLUSTER_SORT
static int comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
bool tuplesort_gettupleslot(Tuplesortstate *state, bool forward, bool copy, TupleTableSlot *slot, Datum *abbrev)
static void removeabbrev_index_brin(Tuplesortstate *state, SortTuple *stups, int count)
#define BRINSORTTUPLE_SIZE(len)
#define DATUM_SORT
void tuplesort_putgintuple(Tuplesortstate *state, GinTuple *tuple, Size size)
static int comparetup_index_hash(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_index_brin(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
Tuplesortstate * tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, SortCoordinate coordinate, int sortopt)
void tuplesort_putbrintuple(Tuplesortstate *state, BrinTuple *tuple, Size size)
static void writetup_heap(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void writetup_index(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void readtup_index_gin(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void removeabbrev_index_gin(Tuplesortstate *state, SortTuple *stups, int count)
Tuplesortstate * tuplesort_begin_index_hash(Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, uint32 max_buckets, int workMem, SortCoordinate coordinate, int sortopt)
static int comparetup_heap_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void freestate_cluster(Tuplesortstate *state)
static void removeabbrev_heap(Tuplesortstate *state, SortTuple *stups, int count)
void tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
static int comparetup_cluster(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_brin(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_hash_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
bool tuplesort_getdatum(Tuplesortstate *state, bool forward, bool copy, Datum *val, bool *isNull, Datum *abbrev)
static void writetup_cluster(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void removeabbrev_cluster(Tuplesortstate *state, SortTuple *stups, int count)
#define HEAP_SORT
static int comparetup_index_gin(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:492
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:454
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:386
#define TYPECACHE_LT_OPR
Definition: typcache.h:138