PostgreSQL Source Code  git master
tuplesortvariants.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * tuplesortvariants.c
4  * Implementation of tuple sorting variants.
5  *
6  * This module handles the sorting of heap tuples, index tuples, or single
7  * Datums. The implementation is based on the generalized tuple sorting
8  * facility given in tuplesort.c. Support other kinds of sortable objects
9  * could be easily added here, another module, or even an extension.
10  *
11  *
12  * Copyright (c) 2022-2023, PostgreSQL Global Development Group
13  *
14  * IDENTIFICATION
15  * src/backend/utils/sort/tuplesortvariants.c
16  *
17  *-------------------------------------------------------------------------
18  */
19 
20 #include "postgres.h"
21 
22 #include "access/hash.h"
23 #include "access/htup_details.h"
24 #include "access/nbtree.h"
25 #include "catalog/index.h"
26 #include "executor/executor.h"
27 #include "pg_trace.h"
28 #include "utils/datum.h"
29 #include "utils/lsyscache.h"
30 #include "utils/guc.h"
31 #include "utils/tuplesort.h"
32 
33 
34 /* sort-type codes for sort__start probes */
35 #define HEAP_SORT 0
36 #define INDEX_SORT 1
37 #define DATUM_SORT 2
38 #define CLUSTER_SORT 3
39 
40 static void removeabbrev_heap(Tuplesortstate *state, SortTuple *stups,
41  int count);
43  int count);
45  int count);
47  int count);
48 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
50 static void writetup_heap(Tuplesortstate *state, LogicalTape *tape,
51  SortTuple *stup);
52 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
53  LogicalTape *tape, unsigned int len);
54 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
57  SortTuple *stup);
58 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
59  LogicalTape *tape, unsigned int tuplen);
60 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
62 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
65  SortTuple *stup);
66 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
67  LogicalTape *tape, unsigned int len);
68 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
71  SortTuple *stup);
72 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
73  LogicalTape *tape, unsigned int len);
75 
76 /*
77  * Data struture pointed by "TuplesortPublic.arg" for the CLUSTER case. Set by
78  * the tuplesort_begin_cluster.
79  */
80 typedef struct
81 {
83 
84  IndexInfo *indexInfo; /* info about index being used for reference */
85  EState *estate; /* for evaluating index expressions */
87 
88 /*
89  * Data struture pointed by "TuplesortPublic.arg" for the IndexTuple case.
90  * Set by tuplesort_begin_index_xxx and used only by the IndexTuple routines.
91  */
92 typedef struct
93 {
94  Relation heapRel; /* table the index is being built on */
95  Relation indexRel; /* index being built */
97 
98 /*
99  * Data struture pointed by "TuplesortPublic.arg" for the index_btree subcase.
100  */
101 typedef struct
102 {
104 
105  bool enforceUnique; /* complain if we find duplicate tuples */
106  bool uniqueNullsNotDistinct; /* unique constraint null treatment */
108 
109 /*
110  * Data struture pointed by "TuplesortPublic.arg" for the index_hash subcase.
111  */
112 typedef struct
113 {
115 
116  uint32 high_mask; /* masks for sortable part of hash code */
120 
121 /*
122  * Data struture pointed by "TuplesortPublic.arg" for the Datum case.
123  * Set by tuplesort_begin_datum and used only by the DatumTuple routines.
124  */
125 typedef struct
126 {
127  /* the datatype oid of Datum's to be sorted */
129  /* we need typelen in order to know how to copy the Datums. */
132 
135  int nkeys, AttrNumber *attNums,
136  Oid *sortOperators, Oid *sortCollations,
137  bool *nullsFirstFlags,
138  int workMem, SortCoordinate coordinate, int sortopt)
139 {
140  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
141  sortopt);
143  MemoryContext oldcontext;
144  int i;
145 
146  oldcontext = MemoryContextSwitchTo(base->maincontext);
147 
148  Assert(nkeys > 0);
149 
150 #ifdef TRACE_SORT
151  if (trace_sort)
152  elog(LOG,
153  "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
154  nkeys, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
155 #endif
156 
157  base->nKeys = nkeys;
158 
159  TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
160  false, /* no unique check */
161  nkeys,
162  workMem,
163  sortopt & TUPLESORT_RANDOMACCESS,
164  PARALLEL_SORT(coordinate));
165 
167  base->comparetup = comparetup_heap;
168  base->writetup = writetup_heap;
169  base->readtup = readtup_heap;
170  base->haveDatum1 = true;
171  base->arg = tupDesc; /* assume we need not copy tupDesc */
172 
173  /* Prepare SortSupport data for each column */
174  base->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
175 
176  for (i = 0; i < nkeys; i++)
177  {
178  SortSupport sortKey = base->sortKeys + i;
179 
180  Assert(attNums[i] != 0);
181  Assert(sortOperators[i] != 0);
182 
183  sortKey->ssup_cxt = CurrentMemoryContext;
184  sortKey->ssup_collation = sortCollations[i];
185  sortKey->ssup_nulls_first = nullsFirstFlags[i];
186  sortKey->ssup_attno = attNums[i];
187  /* Convey if abbreviation optimization is applicable in principle */
188  sortKey->abbreviate = (i == 0 && base->haveDatum1);
189 
190  PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
191  }
192 
193  /*
194  * The "onlyKey" optimization cannot be used with abbreviated keys, since
195  * tie-breaker comparisons may be required. Typically, the optimization
196  * is only of value to pass-by-value types anyway, whereas abbreviated
197  * keys are typically only of value to pass-by-reference types.
198  */
199  if (nkeys == 1 && !base->sortKeys->abbrev_converter)
200  base->onlyKey = base->sortKeys;
201 
202  MemoryContextSwitchTo(oldcontext);
203 
204  return state;
205 }
206 
209  Relation indexRel,
210  int workMem,
211  SortCoordinate coordinate, int sortopt)
212 {
213  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
214  sortopt);
216  BTScanInsert indexScanKey;
217  MemoryContext oldcontext;
219  int i;
220 
221  Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
222 
223  oldcontext = MemoryContextSwitchTo(base->maincontext);
225 
226 #ifdef TRACE_SORT
227  if (trace_sort)
228  elog(LOG,
229  "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
231  workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
232 #endif
233 
235 
236  TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
237  false, /* no unique check */
238  base->nKeys,
239  workMem,
240  sortopt & TUPLESORT_RANDOMACCESS,
241  PARALLEL_SORT(coordinate));
242 
245  base->writetup = writetup_cluster;
246  base->readtup = readtup_cluster;
248  base->arg = arg;
249 
250  arg->indexInfo = BuildIndexInfo(indexRel);
251 
252  /*
253  * If we don't have a simple leading attribute, we don't currently
254  * initialize datum1, so disable optimizations that require it.
255  */
256  if (arg->indexInfo->ii_IndexAttrNumbers[0] == 0)
257  base->haveDatum1 = false;
258  else
259  base->haveDatum1 = true;
260 
261  arg->tupDesc = tupDesc; /* assume we need not copy tupDesc */
262 
263  indexScanKey = _bt_mkscankey(indexRel, NULL);
264 
265  if (arg->indexInfo->ii_Expressions != NULL)
266  {
267  TupleTableSlot *slot;
268  ExprContext *econtext;
269 
270  /*
271  * We will need to use FormIndexDatum to evaluate the index
272  * expressions. To do that, we need an EState, as well as a
273  * TupleTableSlot to put the table tuples into. The econtext's
274  * scantuple has to point to that slot, too.
275  */
276  arg->estate = CreateExecutorState();
277  slot = MakeSingleTupleTableSlot(tupDesc, &TTSOpsHeapTuple);
278  econtext = GetPerTupleExprContext(arg->estate);
279  econtext->ecxt_scantuple = slot;
280  }
281 
282  /* Prepare SortSupport data for each column */
283  base->sortKeys = (SortSupport) palloc0(base->nKeys *
284  sizeof(SortSupportData));
285 
286  for (i = 0; i < base->nKeys; i++)
287  {
288  SortSupport sortKey = base->sortKeys + i;
289  ScanKey scanKey = indexScanKey->scankeys + i;
290  int16 strategy;
291 
292  sortKey->ssup_cxt = CurrentMemoryContext;
293  sortKey->ssup_collation = scanKey->sk_collation;
294  sortKey->ssup_nulls_first =
295  (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
296  sortKey->ssup_attno = scanKey->sk_attno;
297  /* Convey if abbreviation optimization is applicable in principle */
298  sortKey->abbreviate = (i == 0 && base->haveDatum1);
299 
300  Assert(sortKey->ssup_attno != 0);
301 
302  strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
304 
305  PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
306  }
307 
308  pfree(indexScanKey);
309 
310  MemoryContextSwitchTo(oldcontext);
311 
312  return state;
313 }
314 
317  Relation indexRel,
318  bool enforceUnique,
319  bool uniqueNullsNotDistinct,
320  int workMem,
321  SortCoordinate coordinate,
322  int sortopt)
323 {
324  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
325  sortopt);
327  BTScanInsert indexScanKey;
329  MemoryContext oldcontext;
330  int i;
331 
332  oldcontext = MemoryContextSwitchTo(base->maincontext);
334 
335 #ifdef TRACE_SORT
336  if (trace_sort)
337  elog(LOG,
338  "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
339  enforceUnique ? 't' : 'f',
340  workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
341 #endif
342 
344 
345  TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
346  enforceUnique,
347  base->nKeys,
348  workMem,
349  sortopt & TUPLESORT_RANDOMACCESS,
350  PARALLEL_SORT(coordinate));
351 
354  base->writetup = writetup_index;
355  base->readtup = readtup_index;
356  base->haveDatum1 = true;
357  base->arg = arg;
358 
359  arg->index.heapRel = heapRel;
360  arg->index.indexRel = indexRel;
361  arg->enforceUnique = enforceUnique;
362  arg->uniqueNullsNotDistinct = uniqueNullsNotDistinct;
363 
364  indexScanKey = _bt_mkscankey(indexRel, NULL);
365 
366  /* Prepare SortSupport data for each column */
367  base->sortKeys = (SortSupport) palloc0(base->nKeys *
368  sizeof(SortSupportData));
369 
370  for (i = 0; i < base->nKeys; i++)
371  {
372  SortSupport sortKey = base->sortKeys + i;
373  ScanKey scanKey = indexScanKey->scankeys + i;
374  int16 strategy;
375 
376  sortKey->ssup_cxt = CurrentMemoryContext;
377  sortKey->ssup_collation = scanKey->sk_collation;
378  sortKey->ssup_nulls_first =
379  (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
380  sortKey->ssup_attno = scanKey->sk_attno;
381  /* Convey if abbreviation optimization is applicable in principle */
382  sortKey->abbreviate = (i == 0 && base->haveDatum1);
383 
384  Assert(sortKey->ssup_attno != 0);
385 
386  strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
388 
389  PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
390  }
391 
392  pfree(indexScanKey);
393 
394  MemoryContextSwitchTo(oldcontext);
395 
396  return state;
397 }
398 
401  Relation indexRel,
402  uint32 high_mask,
403  uint32 low_mask,
404  uint32 max_buckets,
405  int workMem,
406  SortCoordinate coordinate,
407  int sortopt)
408 {
409  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
410  sortopt);
412  MemoryContext oldcontext;
414 
415  oldcontext = MemoryContextSwitchTo(base->maincontext);
417 
418 #ifdef TRACE_SORT
419  if (trace_sort)
420  elog(LOG,
421  "begin index sort: high_mask = 0x%x, low_mask = 0x%x, "
422  "max_buckets = 0x%x, workMem = %d, randomAccess = %c",
423  high_mask,
424  low_mask,
425  max_buckets,
426  workMem,
427  sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
428 #endif
429 
430  base->nKeys = 1; /* Only one sort column, the hash code */
431 
434  base->writetup = writetup_index;
435  base->readtup = readtup_index;
436  base->haveDatum1 = true;
437  base->arg = arg;
438 
439  arg->index.heapRel = heapRel;
440  arg->index.indexRel = indexRel;
441 
442  arg->high_mask = high_mask;
443  arg->low_mask = low_mask;
444  arg->max_buckets = max_buckets;
445 
446  MemoryContextSwitchTo(oldcontext);
447 
448  return state;
449 }
450 
453  Relation indexRel,
454  int workMem,
455  SortCoordinate coordinate,
456  int sortopt)
457 {
458  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
459  sortopt);
461  MemoryContext oldcontext;
463  int i;
464 
465  oldcontext = MemoryContextSwitchTo(base->maincontext);
467 
468 #ifdef TRACE_SORT
469  if (trace_sort)
470  elog(LOG,
471  "begin index sort: workMem = %d, randomAccess = %c",
472  workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
473 #endif
474 
476 
479  base->writetup = writetup_index;
480  base->readtup = readtup_index;
481  base->haveDatum1 = true;
482  base->arg = arg;
483 
484  arg->index.heapRel = heapRel;
485  arg->index.indexRel = indexRel;
486  arg->enforceUnique = false;
487  arg->uniqueNullsNotDistinct = false;
488 
489  /* Prepare SortSupport data for each column */
490  base->sortKeys = (SortSupport) palloc0(base->nKeys *
491  sizeof(SortSupportData));
492 
493  for (i = 0; i < base->nKeys; i++)
494  {
495  SortSupport sortKey = base->sortKeys + i;
496 
497  sortKey->ssup_cxt = CurrentMemoryContext;
498  sortKey->ssup_collation = indexRel->rd_indcollation[i];
499  sortKey->ssup_nulls_first = false;
500  sortKey->ssup_attno = i + 1;
501  /* Convey if abbreviation optimization is applicable in principle */
502  sortKey->abbreviate = (i == 0 && base->haveDatum1);
503 
504  Assert(sortKey->ssup_attno != 0);
505 
506  /* Look for a sort support function */
507  PrepareSortSupportFromGistIndexRel(indexRel, sortKey);
508  }
509 
510  MemoryContextSwitchTo(oldcontext);
511 
512  return state;
513 }
514 
516 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
517  bool nullsFirstFlag, int workMem,
518  SortCoordinate coordinate, int sortopt)
519 {
520  Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
521  sortopt);
524  MemoryContext oldcontext;
525  int16 typlen;
526  bool typbyval;
527 
528  oldcontext = MemoryContextSwitchTo(base->maincontext);
530 
531 #ifdef TRACE_SORT
532  if (trace_sort)
533  elog(LOG,
534  "begin datum sort: workMem = %d, randomAccess = %c",
535  workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
536 #endif
537 
538  base->nKeys = 1; /* always a one-column sort */
539 
540  TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
541  false, /* no unique check */
542  1,
543  workMem,
544  sortopt & TUPLESORT_RANDOMACCESS,
545  PARALLEL_SORT(coordinate));
546 
549  base->writetup = writetup_datum;
550  base->readtup = readtup_datum;
551  base->haveDatum1 = true;
552  base->arg = arg;
553 
554  arg->datumType = datumType;
555 
556  /* lookup necessary attributes of the datum type */
557  get_typlenbyval(datumType, &typlen, &typbyval);
558  arg->datumTypeLen = typlen;
559  base->tuples = !typbyval;
560 
561  /* Prepare SortSupport data */
562  base->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
563 
565  base->sortKeys->ssup_collation = sortCollation;
566  base->sortKeys->ssup_nulls_first = nullsFirstFlag;
567 
568  /*
569  * Abbreviation is possible here only for by-reference types. In theory,
570  * a pass-by-value datatype could have an abbreviated form that is cheaper
571  * to compare. In a tuple sort, we could support that, because we can
572  * always extract the original datum from the tuple as needed. Here, we
573  * can't, because a datum sort only stores a single copy of the datum; the
574  * "tuple" field of each SortTuple is NULL.
575  */
576  base->sortKeys->abbreviate = !typbyval;
577 
578  PrepareSortSupportFromOrderingOp(sortOperator, base->sortKeys);
579 
580  /*
581  * The "onlyKey" optimization cannot be used with abbreviated keys, since
582  * tie-breaker comparisons may be required. Typically, the optimization
583  * is only of value to pass-by-value types anyway, whereas abbreviated
584  * keys are typically only of value to pass-by-reference types.
585  */
586  if (!base->sortKeys->abbrev_converter)
587  base->onlyKey = base->sortKeys;
588 
589  MemoryContextSwitchTo(oldcontext);
590 
591  return state;
592 }
593 
594 /*
595  * Accept one tuple while collecting input data for sort.
596  *
597  * Note that the input data is always copied; the caller need not save it.
598  */
599 void
601 {
604  TupleDesc tupDesc = (TupleDesc) base->arg;
605  SortTuple stup;
606  MinimalTuple tuple;
607  HeapTupleData htup;
608 
609  /* copy the tuple into sort storage */
610  tuple = ExecCopySlotMinimalTuple(slot);
611  stup.tuple = (void *) tuple;
612  /* set up first-column key value */
613  htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
614  htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
615  stup.datum1 = heap_getattr(&htup,
616  base->sortKeys[0].ssup_attno,
617  tupDesc,
618  &stup.isnull1);
619 
621  base->sortKeys->abbrev_converter &&
622  !stup.isnull1);
623 
624  MemoryContextSwitchTo(oldcontext);
625 }
626 
627 /*
628  * Accept one tuple while collecting input data for sort.
629  *
630  * Note that the input data is always copied; the caller need not save it.
631  */
632 void
634 {
635  SortTuple stup;
639 
640  /* copy the tuple into sort storage */
641  tup = heap_copytuple(tup);
642  stup.tuple = (void *) tup;
643 
644  /*
645  * set up first-column key value, and potentially abbreviate, if it's a
646  * simple column
647  */
648  if (base->haveDatum1)
649  {
650  stup.datum1 = heap_getattr(tup,
651  arg->indexInfo->ii_IndexAttrNumbers[0],
652  arg->tupDesc,
653  &stup.isnull1);
654  }
655 
657  base->haveDatum1 &&
658  base->sortKeys->abbrev_converter &&
659  !stup.isnull1);
660 
661  MemoryContextSwitchTo(oldcontext);
662 }
663 
664 /*
665  * Collect one index tuple while collecting input data for sort, building
666  * it from caller-supplied values.
667  */
668 void
670  ItemPointer self, Datum *values,
671  bool *isnull)
672 {
673  SortTuple stup;
674  IndexTuple tuple;
677 
679  isnull, base->tuplecontext);
680  tuple = ((IndexTuple) stup.tuple);
681  tuple->t_tid = *self;
682  /* set up first-column key value */
683  stup.datum1 = index_getattr(tuple,
684  1,
685  RelationGetDescr(arg->indexRel),
686  &stup.isnull1);
687 
689  base->sortKeys &&
690  base->sortKeys->abbrev_converter &&
691  !stup.isnull1);
692 }
693 
694 /*
695  * Accept one Datum while collecting input data for sort.
696  *
697  * If the Datum is pass-by-ref type, the value will be copied.
698  */
699 void
701 {
705  SortTuple stup;
706 
707  /*
708  * Pass-by-value types or null values are just stored directly in
709  * stup.datum1 (and stup.tuple is not used and set to NULL).
710  *
711  * Non-null pass-by-reference values need to be copied into memory we
712  * control, and possibly abbreviated. The copied value is pointed to by
713  * stup.tuple and is treated as the canonical copy (e.g. to return via
714  * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
715  * abbreviated value if abbreviation is happening, otherwise it's
716  * identical to stup.tuple.
717  */
718 
719  if (isNull || !base->tuples)
720  {
721  /*
722  * Set datum1 to zeroed representation for NULLs (to be consistent,
723  * and to support cheap inequality tests for NULL abbreviated keys).
724  */
725  stup.datum1 = !isNull ? val : (Datum) 0;
726  stup.isnull1 = isNull;
727  stup.tuple = NULL; /* no separate storage */
728  }
729  else
730  {
731  stup.isnull1 = false;
732  stup.datum1 = datumCopy(val, false, arg->datumTypeLen);
733  stup.tuple = DatumGetPointer(stup.datum1);
734  }
735 
737  base->tuples &&
738  base->sortKeys->abbrev_converter && !isNull);
739 
740  MemoryContextSwitchTo(oldcontext);
741 }
742 
743 /*
744  * Fetch the next tuple in either forward or back direction.
745  * If successful, put tuple in slot and return true; else, clear the slot
746  * and return false.
747  *
748  * Caller may optionally be passed back abbreviated value (on true return
749  * value) when abbreviation was used, which can be used to cheaply avoid
750  * equality checks that might otherwise be required. Caller can safely make a
751  * determination of "non-equal tuple" based on simple binary inequality. A
752  * NULL value in leading attribute will set abbreviated value to zeroed
753  * representation, which caller may rely on in abbreviated inequality check.
754  *
755  * If copy is true, the slot receives a tuple that's been copied into the
756  * caller's memory context, so that it will stay valid regardless of future
757  * manipulations of the tuplesort's state (up to and including deleting the
758  * tuplesort). If copy is false, the slot will just receive a pointer to a
759  * tuple held within the tuplesort, which is more efficient, but only safe for
760  * callers that are prepared to have any subsequent manipulation of the
761  * tuplesort's state invalidate slot contents.
762  */
763 bool
764 tuplesort_gettupleslot(Tuplesortstate *state, bool forward, bool copy,
765  TupleTableSlot *slot, Datum *abbrev)
766 {
768  MemoryContext oldcontext = MemoryContextSwitchTo(base->sortcontext);
769  SortTuple stup;
770 
771  if (!tuplesort_gettuple_common(state, forward, &stup))
772  stup.tuple = NULL;
773 
774  MemoryContextSwitchTo(oldcontext);
775 
776  if (stup.tuple)
777  {
778  /* Record abbreviated key for caller */
779  if (base->sortKeys->abbrev_converter && abbrev)
780  *abbrev = stup.datum1;
781 
782  if (copy)
784 
785  ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, copy);
786  return true;
787  }
788  else
789  {
790  ExecClearTuple(slot);
791  return false;
792  }
793 }
794 
795 /*
796  * Fetch the next tuple in either forward or back direction.
797  * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
798  * context, and must not be freed by caller. Caller may not rely on tuple
799  * remaining valid after any further manipulation of tuplesort.
800  */
801 HeapTuple
803 {
805  MemoryContext oldcontext = MemoryContextSwitchTo(base->sortcontext);
806  SortTuple stup;
807 
808  if (!tuplesort_gettuple_common(state, forward, &stup))
809  stup.tuple = NULL;
810 
811  MemoryContextSwitchTo(oldcontext);
812 
813  return stup.tuple;
814 }
815 
816 /*
817  * Fetch the next index tuple in either forward or back direction.
818  * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
819  * context, and must not be freed by caller. Caller may not rely on tuple
820  * remaining valid after any further manipulation of tuplesort.
821  */
824 {
826  MemoryContext oldcontext = MemoryContextSwitchTo(base->sortcontext);
827  SortTuple stup;
828 
829  if (!tuplesort_gettuple_common(state, forward, &stup))
830  stup.tuple = NULL;
831 
832  MemoryContextSwitchTo(oldcontext);
833 
834  return (IndexTuple) stup.tuple;
835 }
836 
837 /*
838  * Fetch the next Datum in either forward or back direction.
839  * Returns false if no more datums.
840  *
841  * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
842  * in caller's context, and is now owned by the caller (this differs from
843  * similar routines for other types of tuplesorts).
844  *
845  * Caller may optionally be passed back abbreviated value (on true return
846  * value) when abbreviation was used, which can be used to cheaply avoid
847  * equality checks that might otherwise be required. Caller can safely make a
848  * determination of "non-equal tuple" based on simple binary inequality. A
849  * NULL value will have a zeroed abbreviated value representation, which caller
850  * may rely on in abbreviated inequality check.
851  *
852  * For byref Datums, if copy is true, *val is set to a copy of the Datum
853  * copied into the caller's memory context, so that it will stay valid
854  * regardless of future manipulations of the tuplesort's state (up to and
855  * including deleting the tuplesort). If copy is false, *val will just be
856  * set to a pointer to the Datum held within the tuplesort, which is more
857  * efficient, but only safe for callers that are prepared to have any
858  * subsequent manipulation of the tuplesort's state invalidate slot contents.
859  * For byval Datums, the value of the 'copy' parameter has no effect.
860 
861  */
862 bool
863 tuplesort_getdatum(Tuplesortstate *state, bool forward, bool copy,
864  Datum *val, bool *isNull, Datum *abbrev)
865 {
867  MemoryContext oldcontext = MemoryContextSwitchTo(base->sortcontext);
869  SortTuple stup;
870 
871  if (!tuplesort_gettuple_common(state, forward, &stup))
872  {
873  MemoryContextSwitchTo(oldcontext);
874  return false;
875  }
876 
877  /* Ensure we copy into caller's memory context */
878  MemoryContextSwitchTo(oldcontext);
879 
880  /* Record abbreviated key for caller */
881  if (base->sortKeys->abbrev_converter && abbrev)
882  *abbrev = stup.datum1;
883 
884  if (stup.isnull1 || !base->tuples)
885  {
886  *val = stup.datum1;
887  *isNull = stup.isnull1;
888  }
889  else
890  {
891  /* use stup.tuple because stup.datum1 may be an abbreviation */
892  if (copy)
893  *val = datumCopy(PointerGetDatum(stup.tuple), false,
894  arg->datumTypeLen);
895  else
896  *val = PointerGetDatum(stup.tuple);
897  *isNull = false;
898  }
899 
900  return true;
901 }
902 
903 
904 /*
905  * Routines specialized for HeapTuple (actually MinimalTuple) case
906  */
907 
908 static void
910 {
911  int i;
913 
914  for (i = 0; i < count; i++)
915  {
916  HeapTupleData htup;
917 
918  htup.t_len = ((MinimalTuple) stups[i].tuple)->t_len +
920  htup.t_data = (HeapTupleHeader) ((char *) stups[i].tuple -
922  stups[i].datum1 = heap_getattr(&htup,
923  base->sortKeys[0].ssup_attno,
924  (TupleDesc) base->arg,
925  &stups[i].isnull1);
926  }
927 }
928 
929 static int
931 {
933  SortSupport sortKey = base->sortKeys;
934  HeapTupleData ltup;
935  HeapTupleData rtup;
936  TupleDesc tupDesc;
937  int nkey;
938  int32 compare;
939  AttrNumber attno;
940  Datum datum1,
941  datum2;
942  bool isnull1,
943  isnull2;
944 
945 
946  /* Compare the leading sort key */
947  compare = ApplySortComparator(a->datum1, a->isnull1,
948  b->datum1, b->isnull1,
949  sortKey);
950  if (compare != 0)
951  return compare;
952 
953  /* Compare additional sort keys */
954  ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
955  ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
956  rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
957  rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
958  tupDesc = (TupleDesc) base->arg;
959 
960  if (sortKey->abbrev_converter)
961  {
962  attno = sortKey->ssup_attno;
963 
964  datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
965  datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
966 
967  compare = ApplySortAbbrevFullComparator(datum1, isnull1,
968  datum2, isnull2,
969  sortKey);
970  if (compare != 0)
971  return compare;
972  }
973 
974  sortKey++;
975  for (nkey = 1; nkey < base->nKeys; nkey++, sortKey++)
976  {
977  attno = sortKey->ssup_attno;
978 
979  datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
980  datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
981 
982  compare = ApplySortComparator(datum1, isnull1,
983  datum2, isnull2,
984  sortKey);
985  if (compare != 0)
986  return compare;
987  }
988 
989  return 0;
990 }
991 
992 static void
994 {
996  MinimalTuple tuple = (MinimalTuple) stup->tuple;
997 
998  /* the part of the MinimalTuple we'll write: */
999  char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
1000  unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
1001 
1002  /* total on-disk footprint: */
1003  unsigned int tuplen = tupbodylen + sizeof(int);
1004 
1005  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1006  LogicalTapeWrite(tape, tupbody, tupbodylen);
1007  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1008  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1009 }
1010 
1011 static void
1013  LogicalTape *tape, unsigned int len)
1014 {
1015  unsigned int tupbodylen = len - sizeof(int);
1016  unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
1018  char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
1020  HeapTupleData htup;
1021 
1022  /* read in the tuple proper */
1023  tuple->t_len = tuplen;
1024  LogicalTapeReadExact(tape, tupbody, tupbodylen);
1025  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1026  LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1027  stup->tuple = (void *) tuple;
1028  /* set up first-column key value */
1029  htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
1030  htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
1031  stup->datum1 = heap_getattr(&htup,
1032  base->sortKeys[0].ssup_attno,
1033  (TupleDesc) base->arg,
1034  &stup->isnull1);
1035 }
1036 
1037 /*
1038  * Routines specialized for the CLUSTER case (HeapTuple data, with
1039  * comparisons per a btree index definition)
1040  */
1041 
1042 static void
1044 {
1045  int i;
1048 
1049  for (i = 0; i < count; i++)
1050  {
1051  HeapTuple tup;
1052 
1053  tup = (HeapTuple) stups[i].tuple;
1054  stups[i].datum1 = heap_getattr(tup,
1055  arg->indexInfo->ii_IndexAttrNumbers[0],
1056  arg->tupDesc,
1057  &stups[i].isnull1);
1058  }
1059 }
1060 
1061 static int
1064 {
1067  SortSupport sortKey = base->sortKeys;
1068  HeapTuple ltup;
1069  HeapTuple rtup;
1070  TupleDesc tupDesc;
1071  int nkey;
1072  int32 compare;
1073  Datum datum1,
1074  datum2;
1075  bool isnull1,
1076  isnull2;
1077 
1078  /* Be prepared to compare additional sort keys */
1079  ltup = (HeapTuple) a->tuple;
1080  rtup = (HeapTuple) b->tuple;
1081  tupDesc = arg->tupDesc;
1082 
1083  /* Compare the leading sort key, if it's simple */
1084  if (base->haveDatum1)
1085  {
1086  compare = ApplySortComparator(a->datum1, a->isnull1,
1087  b->datum1, b->isnull1,
1088  sortKey);
1089  if (compare != 0)
1090  return compare;
1091 
1092  if (sortKey->abbrev_converter)
1093  {
1094  AttrNumber leading = arg->indexInfo->ii_IndexAttrNumbers[0];
1095 
1096  datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
1097  datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
1098 
1099  compare = ApplySortAbbrevFullComparator(datum1, isnull1,
1100  datum2, isnull2,
1101  sortKey);
1102  }
1103  if (compare != 0 || base->nKeys == 1)
1104  return compare;
1105  /* Compare additional columns the hard way */
1106  sortKey++;
1107  nkey = 1;
1108  }
1109  else
1110  {
1111  /* Must compare all keys the hard way */
1112  nkey = 0;
1113  }
1114 
1115  if (arg->indexInfo->ii_Expressions == NULL)
1116  {
1117  /* If not expression index, just compare the proper heap attrs */
1118 
1119  for (; nkey < base->nKeys; nkey++, sortKey++)
1120  {
1121  AttrNumber attno = arg->indexInfo->ii_IndexAttrNumbers[nkey];
1122 
1123  datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
1124  datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
1125 
1126  compare = ApplySortComparator(datum1, isnull1,
1127  datum2, isnull2,
1128  sortKey);
1129  if (compare != 0)
1130  return compare;
1131  }
1132  }
1133  else
1134  {
1135  /*
1136  * In the expression index case, compute the whole index tuple and
1137  * then compare values. It would perhaps be faster to compute only as
1138  * many columns as we need to compare, but that would require
1139  * duplicating all the logic in FormIndexDatum.
1140  */
1141  Datum l_index_values[INDEX_MAX_KEYS];
1142  bool l_index_isnull[INDEX_MAX_KEYS];
1143  Datum r_index_values[INDEX_MAX_KEYS];
1144  bool r_index_isnull[INDEX_MAX_KEYS];
1145  TupleTableSlot *ecxt_scantuple;
1146 
1147  /* Reset context each time to prevent memory leakage */
1148  ResetPerTupleExprContext(arg->estate);
1149 
1150  ecxt_scantuple = GetPerTupleExprContext(arg->estate)->ecxt_scantuple;
1151 
1152  ExecStoreHeapTuple(ltup, ecxt_scantuple, false);
1153  FormIndexDatum(arg->indexInfo, ecxt_scantuple, arg->estate,
1154  l_index_values, l_index_isnull);
1155 
1156  ExecStoreHeapTuple(rtup, ecxt_scantuple, false);
1157  FormIndexDatum(arg->indexInfo, ecxt_scantuple, arg->estate,
1158  r_index_values, r_index_isnull);
1159 
1160  for (; nkey < base->nKeys; nkey++, sortKey++)
1161  {
1162  compare = ApplySortComparator(l_index_values[nkey],
1163  l_index_isnull[nkey],
1164  r_index_values[nkey],
1165  r_index_isnull[nkey],
1166  sortKey);
1167  if (compare != 0)
1168  return compare;
1169  }
1170  }
1171 
1172  return 0;
1173 }
1174 
1175 static void
1177 {
1179  HeapTuple tuple = (HeapTuple) stup->tuple;
1180  unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
1181 
1182  /* We need to store t_self, but not other fields of HeapTupleData */
1183  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1184  LogicalTapeWrite(tape, &tuple->t_self, sizeof(ItemPointerData));
1185  LogicalTapeWrite(tape, tuple->t_data, tuple->t_len);
1186  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1187  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1188 }
1189 
1190 static void
1192  LogicalTape *tape, unsigned int tuplen)
1193 {
1196  unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
1198  t_len + HEAPTUPLESIZE);
1199 
1200  /* Reconstruct the HeapTupleData header */
1201  tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
1202  tuple->t_len = t_len;
1203  LogicalTapeReadExact(tape, &tuple->t_self, sizeof(ItemPointerData));
1204  /* We don't currently bother to reconstruct t_tableOid */
1205  tuple->t_tableOid = InvalidOid;
1206  /* Read in the tuple body */
1207  LogicalTapeReadExact(tape, tuple->t_data, tuple->t_len);
1208  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1209  LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1210  stup->tuple = (void *) tuple;
1211  /* set up first-column key value, if it's a simple column */
1212  if (base->haveDatum1)
1213  stup->datum1 = heap_getattr(tuple,
1214  arg->indexInfo->ii_IndexAttrNumbers[0],
1215  arg->tupDesc,
1216  &stup->isnull1);
1217 }
1218 
1219 static void
1221 {
1224 
1225  /* Free any execution state created for CLUSTER case */
1226  if (arg->estate != NULL)
1227  {
1228  ExprContext *econtext = GetPerTupleExprContext(arg->estate);
1229 
1231  FreeExecutorState(arg->estate);
1232  }
1233 }
1234 
1235 /*
1236  * Routines specialized for IndexTuple case
1237  *
1238  * The btree and hash cases require separate comparison functions, but the
1239  * IndexTuple representation is the same so the copy/write/read support
1240  * functions can be shared.
1241  */
1242 
1243 static void
1245 {
1248  int i;
1249 
1250  for (i = 0; i < count; i++)
1251  {
1252  IndexTuple tuple;
1253 
1254  tuple = stups[i].tuple;
1255  stups[i].datum1 = index_getattr(tuple,
1256  1,
1257  RelationGetDescr(arg->indexRel),
1258  &stups[i].isnull1);
1259  }
1260 }
1261 
1262 static int
1265 {
1266  /*
1267  * This is similar to comparetup_heap(), but expects index tuples. There
1268  * is also special handling for enforcing uniqueness, and special
1269  * treatment for equal keys at the end.
1270  */
1273  SortSupport sortKey = base->sortKeys;
1274  IndexTuple tuple1;
1275  IndexTuple tuple2;
1276  int keysz;
1277  TupleDesc tupDes;
1278  bool equal_hasnull = false;
1279  int nkey;
1280  int32 compare;
1281  Datum datum1,
1282  datum2;
1283  bool isnull1,
1284  isnull2;
1285 
1286 
1287  /* Compare the leading sort key */
1288  compare = ApplySortComparator(a->datum1, a->isnull1,
1289  b->datum1, b->isnull1,
1290  sortKey);
1291  if (compare != 0)
1292  return compare;
1293 
1294  /* Compare additional sort keys */
1295  tuple1 = (IndexTuple) a->tuple;
1296  tuple2 = (IndexTuple) b->tuple;
1297  keysz = base->nKeys;
1298  tupDes = RelationGetDescr(arg->index.indexRel);
1299 
1300  if (sortKey->abbrev_converter)
1301  {
1302  datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
1303  datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
1304 
1305  compare = ApplySortAbbrevFullComparator(datum1, isnull1,
1306  datum2, isnull2,
1307  sortKey);
1308  if (compare != 0)
1309  return compare;
1310  }
1311 
1312  /* they are equal, so we only need to examine one null flag */
1313  if (a->isnull1)
1314  equal_hasnull = true;
1315 
1316  sortKey++;
1317  for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
1318  {
1319  datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
1320  datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
1321 
1322  compare = ApplySortComparator(datum1, isnull1,
1323  datum2, isnull2,
1324  sortKey);
1325  if (compare != 0)
1326  return compare; /* done when we find unequal attributes */
1327 
1328  /* they are equal, so we only need to examine one null flag */
1329  if (isnull1)
1330  equal_hasnull = true;
1331  }
1332 
1333  /*
1334  * If btree has asked us to enforce uniqueness, complain if two equal
1335  * tuples are detected (unless there was at least one NULL field and NULLS
1336  * NOT DISTINCT was not set).
1337  *
1338  * It is sufficient to make the test here, because if two tuples are equal
1339  * they *must* get compared at some stage of the sort --- otherwise the
1340  * sort algorithm wouldn't have checked whether one must appear before the
1341  * other.
1342  */
1343  if (arg->enforceUnique && !(!arg->uniqueNullsNotDistinct && equal_hasnull))
1344  {
1346  bool isnull[INDEX_MAX_KEYS];
1347  char *key_desc;
1348 
1349  /*
1350  * Some rather brain-dead implementations of qsort (such as the one in
1351  * QNX 4) will sometimes call the comparison routine to compare a
1352  * value to itself, but we always use our own implementation, which
1353  * does not.
1354  */
1355  Assert(tuple1 != tuple2);
1356 
1357  index_deform_tuple(tuple1, tupDes, values, isnull);
1358 
1359  key_desc = BuildIndexValueDescription(arg->index.indexRel, values, isnull);
1360 
1361  ereport(ERROR,
1362  (errcode(ERRCODE_UNIQUE_VIOLATION),
1363  errmsg("could not create unique index \"%s\"",
1364  RelationGetRelationName(arg->index.indexRel)),
1365  key_desc ? errdetail("Key %s is duplicated.", key_desc) :
1366  errdetail("Duplicate keys exist."),
1367  errtableconstraint(arg->index.heapRel,
1368  RelationGetRelationName(arg->index.indexRel))));
1369  }
1370 
1371  /*
1372  * If key values are equal, we sort on ItemPointer. This is required for
1373  * btree indexes, since heap TID is treated as an implicit last key
1374  * attribute in order to ensure that all keys in the index are physically
1375  * unique.
1376  */
1377  {
1378  BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
1379  BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
1380 
1381  if (blk1 != blk2)
1382  return (blk1 < blk2) ? -1 : 1;
1383  }
1384  {
1385  OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
1386  OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
1387 
1388  if (pos1 != pos2)
1389  return (pos1 < pos2) ? -1 : 1;
1390  }
1391 
1392  /* ItemPointer values should never be equal */
1393  Assert(false);
1394 
1395  return 0;
1396 }
1397 
1398 static int
1401 {
1402  Bucket bucket1;
1403  Bucket bucket2;
1404  uint32 hash1;
1405  uint32 hash2;
1406  IndexTuple tuple1;
1407  IndexTuple tuple2;
1410 
1411  /*
1412  * Fetch hash keys and mask off bits we don't want to sort by, so that the
1413  * initial sort is just on the bucket number. We know that the first
1414  * column of the index tuple is the hash key.
1415  */
1416  Assert(!a->isnull1);
1417  bucket1 = _hash_hashkey2bucket(DatumGetUInt32(a->datum1),
1418  arg->max_buckets, arg->high_mask,
1419  arg->low_mask);
1420  Assert(!b->isnull1);
1421  bucket2 = _hash_hashkey2bucket(DatumGetUInt32(b->datum1),
1422  arg->max_buckets, arg->high_mask,
1423  arg->low_mask);
1424  if (bucket1 > bucket2)
1425  return 1;
1426  else if (bucket1 < bucket2)
1427  return -1;
1428 
1429  /*
1430  * If bucket values are equal, sort by hash values. This allows us to
1431  * insert directly onto bucket/overflow pages, where the index tuples are
1432  * stored in hash order to allow fast binary search within each page.
1433  */
1434  hash1 = DatumGetUInt32(a->datum1);
1435  hash2 = DatumGetUInt32(b->datum1);
1436  if (hash1 > hash2)
1437  return 1;
1438  else if (hash1 < hash2)
1439  return -1;
1440 
1441  /*
1442  * If hash values are equal, we sort on ItemPointer. This does not affect
1443  * validity of the finished index, but it may be useful to have index
1444  * scans in physical order.
1445  */
1446  tuple1 = (IndexTuple) a->tuple;
1447  tuple2 = (IndexTuple) b->tuple;
1448 
1449  {
1450  BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
1451  BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
1452 
1453  if (blk1 != blk2)
1454  return (blk1 < blk2) ? -1 : 1;
1455  }
1456  {
1459 
1460  if (pos1 != pos2)
1461  return (pos1 < pos2) ? -1 : 1;
1462  }
1463 
1464  /* ItemPointer values should never be equal */
1465  Assert(false);
1466 
1467  return 0;
1468 }
1469 
1470 static void
1472 {
1474  IndexTuple tuple = (IndexTuple) stup->tuple;
1475  unsigned int tuplen;
1476 
1477  tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
1478  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1479  LogicalTapeWrite(tape, tuple, IndexTupleSize(tuple));
1480  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1481  LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1482 }
1483 
1484 static void
1486  LogicalTape *tape, unsigned int len)
1487 {
1490  unsigned int tuplen = len - sizeof(unsigned int);
1492 
1493  LogicalTapeReadExact(tape, tuple, tuplen);
1494  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1495  LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1496  stup->tuple = (void *) tuple;
1497  /* set up first-column key value */
1498  stup->datum1 = index_getattr(tuple,
1499  1,
1500  RelationGetDescr(arg->indexRel),
1501  &stup->isnull1);
1502 }
1503 
1504 /*
1505  * Routines specialized for DatumTuple case
1506  */
1507 
1508 static void
1510 {
1511  int i;
1512 
1513  for (i = 0; i < count; i++)
1514  stups[i].datum1 = PointerGetDatum(stups[i].tuple);
1515 }
1516 
1517 static int
1519 {
1521  int compare;
1522 
1523  compare = ApplySortComparator(a->datum1, a->isnull1,
1524  b->datum1, b->isnull1,
1525  base->sortKeys);
1526  if (compare != 0)
1527  return compare;
1528 
1529  /* if we have abbreviations, then "tuple" has the original value */
1530 
1531  if (base->sortKeys->abbrev_converter)
1533  PointerGetDatum(b->tuple), b->isnull1,
1534  base->sortKeys);
1535 
1536  return compare;
1537 }
1538 
1539 static void
1541 {
1544  void *waddr;
1545  unsigned int tuplen;
1546  unsigned int writtenlen;
1547 
1548  if (stup->isnull1)
1549  {
1550  waddr = NULL;
1551  tuplen = 0;
1552  }
1553  else if (!base->tuples)
1554  {
1555  waddr = &stup->datum1;
1556  tuplen = sizeof(Datum);
1557  }
1558  else
1559  {
1560  waddr = stup->tuple;
1561  tuplen = datumGetSize(PointerGetDatum(stup->tuple), false, arg->datumTypeLen);
1562  Assert(tuplen != 0);
1563  }
1564 
1565  writtenlen = tuplen + sizeof(unsigned int);
1566 
1567  LogicalTapeWrite(tape, &writtenlen, sizeof(writtenlen));
1568  LogicalTapeWrite(tape, waddr, tuplen);
1569  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1570  LogicalTapeWrite(tape, &writtenlen, sizeof(writtenlen));
1571 }
1572 
1573 static void
1575  LogicalTape *tape, unsigned int len)
1576 {
1578  unsigned int tuplen = len - sizeof(unsigned int);
1579 
1580  if (tuplen == 0)
1581  {
1582  /* it's NULL */
1583  stup->datum1 = (Datum) 0;
1584  stup->isnull1 = true;
1585  stup->tuple = NULL;
1586  }
1587  else if (!base->tuples)
1588  {
1589  Assert(tuplen == sizeof(Datum));
1590  LogicalTapeReadExact(tape, &stup->datum1, tuplen);
1591  stup->isnull1 = false;
1592  stup->tuple = NULL;
1593  }
1594  else
1595  {
1596  void *raddr = tuplesort_readtup_alloc(state, tuplen);
1597 
1598  LogicalTapeReadExact(tape, raddr, tuplen);
1599  stup->datum1 = PointerGetDatum(raddr);
1600  stup->isnull1 = false;
1601  stup->tuple = raddr;
1602  }
1603 
1604  if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1605  LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1606 }
int16 AttrNumber
Definition: attnum.h:21
uint32 BlockNumber
Definition: block.h:31
static Datum values[MAXATTR]
Definition: bootstrap.c:156
unsigned int uint32
Definition: c.h:490
signed short int16
Definition: c.h:477
signed int int32
Definition: c.h:478
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
Size datumGetSize(Datum value, bool typByVal, int typLen)
Definition: datum.c:65
int errdetail(const char *fmt,...)
Definition: elog.c:1202
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define LOG
Definition: elog.h:31
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1254
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1446
TupleTableSlot * ExecStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1352
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:84
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1238
EState * CreateExecutorState(void)
Definition: execUtils.c:93
void FreeExecutorState(EState *estate)
Definition: execUtils.c:194
#define ResetPerTupleExprContext(estate)
Definition: executor.h:547
#define GetPerTupleExprContext(estate)
Definition: executor.h:538
char * BuildIndexValueDescription(Relation indexRelation, Datum *values, bool *isnull)
Definition: genam.c:177
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
uint32 Bucket
Definition: hash.h:35
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:126
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:680
MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup)
Definition: heaptuple.c:1439
#define HEAPTUPLESIZE
Definition: htup.h:73
HeapTupleData * HeapTuple
Definition: htup.h:71
MinimalTupleData * MinimalTuple
Definition: htup.h:27
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MINIMAL_TUPLE_OFFSET
Definition: htup_details.h:617
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:792
#define MINIMAL_TUPLE_DATA_OFFSET
Definition: htup_details.h:621
void FormIndexDatum(IndexInfo *indexInfo, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
Definition: index.c:2725
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:2434
IndexTuple index_form_tuple_context(TupleDesc tupleDescriptor, Datum *values, bool *isnull, MemoryContext context)
Definition: indextuple.c:65
void index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:456
long val
Definition: informix.c:664
int b
Definition: isn.c:70
int a
Definition: isn.c:69
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:117
Assert(fmt[strlen(fmt) - 1] !='\n')
void LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size)
Definition: logtape.c:761
void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
Definition: lsyscache.c:2209
void pfree(void *pointer)
Definition: mcxt.c:1436
void * palloc0(Size size)
Definition: mcxt.c:1241
MemoryContext CurrentMemoryContext
Definition: mcxt.c:135
void * palloc(Size size)
Definition: mcxt.c:1210
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1091
#define SK_BT_DESC
Definition: nbtree.h:1090
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:90
uint16 OffsetNumber
Definition: off.h:24
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
void * arg
#define INDEX_MAX_KEYS
const void size_t len
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:222
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
#define RelationGetDescr(relation)
Definition: rel.h:529
#define RelationGetNumberOfAttributes(relation)
Definition: rel.h:509
#define RelationGetRelationName(relation)
Definition: rel.h:537
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:522
int errtableconstraint(Relation rel, const char *conname)
Definition: relcache.c:5960
void PrepareSortSupportFromGistIndexRel(Relation indexRel, SortSupport ssup)
Definition: sortsupport.c:189
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
Definition: sortsupport.c:135
void PrepareSortSupportFromIndexRel(Relation indexRel, int16 strategy, SortSupport ssup)
Definition: sortsupport.c:162
static int ApplySortAbbrevFullComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:341
struct SortSupportData * SortSupport
Definition: sortsupport.h:58
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:200
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define BTLessStrategyNumber
Definition: stratnum.h:29
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:799
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:249
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
ItemPointerData t_tid
Definition: itup.h:37
Oid * rd_indcollation
Definition: rel.h:215
Form_pg_class rd_rel
Definition: rel.h:110
int sk_flags
Definition: skey.h:66
Oid sk_collation
Definition: skey.h:70
AttrNumber sk_attno
Definition: skey.h:67
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
Datum(* abbrev_converter)(Datum original, SortSupport ssup)
Definition: sortsupport.h:172
MemoryContext ssup_cxt
Definition: sortsupport.h:66
bool isnull1
Definition: tuplesort.h:140
void * tuple
Definition: tuplesort.h:138
Datum datum1
Definition: tuplesort.h:139
TuplesortIndexArg index
TuplesortIndexArg index
SortSupport onlyKey
Definition: tuplesort.h:227
MemoryContext maincontext
Definition: tuplesort.h:200
void(* writetup)(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
Definition: tuplesort.h:176
void(* removeabbrev)(Tuplesortstate *state, SortTuple *stups, int count)
Definition: tuplesort.h:169
void(* freestate)(Tuplesortstate *state)
Definition: tuplesort.h:194
MemoryContext tuplecontext
Definition: tuplesort.h:203
MemoryContext sortcontext
Definition: tuplesort.h:202
void(* readtup)(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
Definition: tuplesort.h:185
SortTupleComparator comparetup
Definition: tuplesort.h:163
SortSupport sortKeys
Definition: tuplesort.h:217
Definition: regguts.h:318
struct TupleDescData * TupleDesc
Definition: tupdesc.h:89
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
Definition: tuplesort.c:646
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev)
Definition: tuplesort.c:1190
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
Definition: tuplesort.c:1496
bool trace_sort
Definition: tuplesort.c:127
void * tuplesort_readtup_alloc(Tuplesortstate *state, Size tuplen)
Definition: tuplesort.c:2921
#define PARALLEL_SORT(coordinate)
Definition: tuplesort.h:237
#define TUPLESORT_RANDOMACCESS
Definition: tuplesort.h:95
#define LogicalTapeReadExact(tape, ptr, len)
Definition: tuplesort.h:244
#define TuplesortstateGetPublic(state)
Definition: tuplesort.h:241
IndexTuple tuplesort_getindextuple(Tuplesortstate *state, bool forward)
HeapTuple tuplesort_getheaptuple(Tuplesortstate *state, bool forward)
static void removeabbrev_datum(Tuplesortstate *state, SortTuple *stups, int count)
void tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
Tuplesortstate * tuplesort_begin_index_hash(Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, uint32 max_buckets, int workMem, SortCoordinate coordinate, int sortopt)
static int comparetup_index_btree(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_index(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
void tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
static int comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
Tuplesortstate * tuplesort_begin_index_gist(Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
Tuplesortstate * tuplesort_begin_index_btree(Relation heapRel, Relation indexRel, bool enforceUnique, bool uniqueNullsNotDistinct, int workMem, SortCoordinate coordinate, int sortopt)
static void removeabbrev_index(Tuplesortstate *state, SortTuple *stups, int count)
static void readtup_datum(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
#define INDEX_SORT
static void readtup_heap(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void readtup_cluster(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int tuplen)
static void writetup_datum(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
Tuplesortstate * tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, SortCoordinate coordinate, int sortopt)
#define CLUSTER_SORT
static int comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
bool tuplesort_gettupleslot(Tuplesortstate *state, bool forward, bool copy, TupleTableSlot *slot, Datum *abbrev)
#define DATUM_SORT
static int comparetup_index_hash(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
Tuplesortstate * tuplesort_begin_cluster(TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
static void writetup_heap(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void writetup_index(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void freestate_cluster(Tuplesortstate *state)
static void removeabbrev_heap(Tuplesortstate *state, SortTuple *stups, int count)
void tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
static int comparetup_cluster(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
bool tuplesort_getdatum(Tuplesortstate *state, bool forward, bool copy, Datum *val, bool *isNull, Datum *abbrev)
static void writetup_cluster(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
void tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel, ItemPointer self, Datum *values, bool *isnull)
static void removeabbrev_cluster(Tuplesortstate *state, SortTuple *stups, int count)
#define HEAP_SORT
Tuplesortstate * tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, AttrNumber *attNums, Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags, int workMem, SortCoordinate coordinate, int sortopt)
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:471
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:433