PostgreSQL Source Code git master
tuplesort.h File Reference
#include "access/brin_tuple.h"
#include "access/gin_tuple.h"
#include "access/itup.h"
#include "executor/tuptable.h"
#include "storage/dsm.h"
#include "utils/logtape.h"
#include "utils/relcache.h"
#include "utils/sortsupport.h"
Include dependency graph for tuplesort.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  SortCoordinateData
 
struct  TuplesortInstrumentation
 
struct  SortTuple
 
struct  TuplesortPublic
 

Macros

#define NUM_TUPLESORTMETHODS   4
 
#define TUPLESORT_NONE   0
 
#define TUPLESORT_RANDOMACCESS   (1 << 0)
 
#define TUPLESORT_ALLOWBOUNDED   (1 << 1)
 
#define TupleSortUseBumpTupleCxt(opt)   (((opt) & TUPLESORT_ALLOWBOUNDED) == 0)
 
#define PARALLEL_SORT(coordinate)
 
#define TuplesortstateGetPublic(state)   ((TuplesortPublic *) state)
 
#define LogicalTapeReadExact(tape, ptr, len)
 

Typedefs

typedef struct Tuplesortstate Tuplesortstate
 
typedef struct Sharedsort Sharedsort
 
typedef struct SortCoordinateData SortCoordinateData
 
typedef struct SortCoordinateDataSortCoordinate
 
typedef struct TuplesortInstrumentation TuplesortInstrumentation
 
typedef int(* SortTupleComparator) (const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
 

Enumerations

enum  TuplesortMethod {
  SORT_TYPE_STILL_IN_PROGRESS = 0 , SORT_TYPE_TOP_N_HEAPSORT = 1 << 0 , SORT_TYPE_QUICKSORT = 1 << 1 , SORT_TYPE_EXTERNAL_SORT = 1 << 2 ,
  SORT_TYPE_EXTERNAL_MERGE = 1 << 3
}
 
enum  TuplesortSpaceType { SORT_SPACE_TYPE_DISK , SORT_SPACE_TYPE_MEMORY }
 

Functions

Tuplesortstatetuplesort_begin_common (int workMem, SortCoordinate coordinate, int sortopt)
 
void tuplesort_set_bound (Tuplesortstate *state, int64 bound)
 
bool tuplesort_used_bound (Tuplesortstate *state)
 
void tuplesort_puttuple_common (Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
 
void tuplesort_performsort (Tuplesortstate *state)
 
bool tuplesort_gettuple_common (Tuplesortstate *state, bool forward, SortTuple *stup)
 
bool tuplesort_skiptuples (Tuplesortstate *state, int64 ntuples, bool forward)
 
void tuplesort_end (Tuplesortstate *state)
 
void tuplesort_reset (Tuplesortstate *state)
 
void tuplesort_get_stats (Tuplesortstate *state, TuplesortInstrumentation *stats)
 
const char * tuplesort_method_name (TuplesortMethod m)
 
const char * tuplesort_space_type_name (TuplesortSpaceType t)
 
int tuplesort_merge_order (int64 allowedMem)
 
Size tuplesort_estimate_shared (int nWorkers)
 
void tuplesort_initialize_shared (Sharedsort *shared, int nWorkers, dsm_segment *seg)
 
void tuplesort_attach_shared (Sharedsort *shared, dsm_segment *seg)
 
void tuplesort_rescan (Tuplesortstate *state)
 
void tuplesort_markpos (Tuplesortstate *state)
 
void tuplesort_restorepos (Tuplesortstate *state)
 
void * tuplesort_readtup_alloc (Tuplesortstate *state, Size tuplen)
 
Tuplesortstatetuplesort_begin_heap (TupleDesc tupDesc, int nkeys, AttrNumber *attNums, Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_cluster (TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_btree (Relation heapRel, Relation indexRel, bool enforceUnique, bool uniqueNullsNotDistinct, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_hash (Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, uint32 max_buckets, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_gist (Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_brin (int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_gin (Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_datum (Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, SortCoordinate coordinate, int sortopt)
 
void tuplesort_puttupleslot (Tuplesortstate *state, TupleTableSlot *slot)
 
void tuplesort_putheaptuple (Tuplesortstate *state, HeapTuple tup)
 
void tuplesort_putindextuplevalues (Tuplesortstate *state, Relation rel, const ItemPointerData *self, const Datum *values, const bool *isnull)
 
void tuplesort_putbrintuple (Tuplesortstate *state, BrinTuple *tuple, Size size)
 
void tuplesort_putgintuple (Tuplesortstate *state, GinTuple *tuple, Size size)
 
void tuplesort_putdatum (Tuplesortstate *state, Datum val, bool isNull)
 
bool tuplesort_gettupleslot (Tuplesortstate *state, bool forward, bool copy, TupleTableSlot *slot, Datum *abbrev)
 
HeapTuple tuplesort_getheaptuple (Tuplesortstate *state, bool forward)
 
IndexTuple tuplesort_getindextuple (Tuplesortstate *state, bool forward)
 
BrinTupletuplesort_getbrintuple (Tuplesortstate *state, Size *len, bool forward)
 
GinTupletuplesort_getgintuple (Tuplesortstate *state, Size *len, bool forward)
 
bool tuplesort_getdatum (Tuplesortstate *state, bool forward, bool copy, Datum *val, bool *isNull, Datum *abbrev)
 

Macro Definition Documentation

◆ LogicalTapeReadExact

#define LogicalTapeReadExact (   tape,
  ptr,
  len 
)
Value:
do { \
if (LogicalTapeRead(tape, ptr, len) != (size_t) (len)) \
elog(ERROR, "unexpected end of data"); \
} while(0)
#define ERROR
Definition: elog.h:39
size_t LogicalTapeRead(LogicalTape *lt, void *ptr, size_t size)
Definition: logtape.c:928
const void size_t len

Definition at line 263 of file tuplesort.h.

◆ NUM_TUPLESORTMETHODS

#define NUM_TUPLESORTMETHODS   4

Definition at line 85 of file tuplesort.h.

◆ PARALLEL_SORT

#define PARALLEL_SORT (   coordinate)
Value:
(coordinate == NULL || \
(coordinate)->sharedsort == NULL ? 0 : \
(coordinate)->isWorker ? 1 : 2)

Definition at line 256 of file tuplesort.h.

◆ TUPLESORT_ALLOWBOUNDED

#define TUPLESORT_ALLOWBOUNDED   (1 << 1)

Definition at line 100 of file tuplesort.h.

◆ TUPLESORT_NONE

#define TUPLESORT_NONE   0

Definition at line 94 of file tuplesort.h.

◆ TUPLESORT_RANDOMACCESS

#define TUPLESORT_RANDOMACCESS   (1 << 0)

Definition at line 97 of file tuplesort.h.

◆ TuplesortstateGetPublic

#define TuplesortstateGetPublic (   state)    ((TuplesortPublic *) state)

Definition at line 260 of file tuplesort.h.

◆ TupleSortUseBumpTupleCxt

#define TupleSortUseBumpTupleCxt (   opt)    (((opt) & TUPLESORT_ALLOWBOUNDED) == 0)

Definition at line 109 of file tuplesort.h.

Typedef Documentation

◆ Sharedsort

typedef struct Sharedsort Sharedsort

Definition at line 39 of file tuplesort.h.

◆ SortCoordinate

Definition at line 62 of file tuplesort.h.

◆ SortCoordinateData

◆ SortTupleComparator

typedef int(* SortTupleComparator) (const SortTuple *a, const SortTuple *b, Tuplesortstate *state)

Definition at line 156 of file tuplesort.h.

◆ TuplesortInstrumentation

◆ Tuplesortstate

Definition at line 38 of file tuplesort.h.

Enumeration Type Documentation

◆ TuplesortMethod

Enumerator
SORT_TYPE_STILL_IN_PROGRESS 
SORT_TYPE_TOP_N_HEAPSORT 
SORT_TYPE_QUICKSORT 
SORT_TYPE_EXTERNAL_SORT 
SORT_TYPE_EXTERNAL_MERGE 

Definition at line 76 of file tuplesort.h.

77{
80 SORT_TYPE_QUICKSORT = 1 << 1,
TuplesortMethod
Definition: tuplesort.h:77
@ SORT_TYPE_EXTERNAL_SORT
Definition: tuplesort.h:81
@ SORT_TYPE_TOP_N_HEAPSORT
Definition: tuplesort.h:79
@ SORT_TYPE_QUICKSORT
Definition: tuplesort.h:80
@ SORT_TYPE_STILL_IN_PROGRESS
Definition: tuplesort.h:78
@ SORT_TYPE_EXTERNAL_MERGE
Definition: tuplesort.h:82

◆ TuplesortSpaceType

Enumerator
SORT_SPACE_TYPE_DISK 
SORT_SPACE_TYPE_MEMORY 

Definition at line 87 of file tuplesort.h.

88{
TuplesortSpaceType
Definition: tuplesort.h:88
@ SORT_SPACE_TYPE_DISK
Definition: tuplesort.h:89
@ SORT_SPACE_TYPE_MEMORY
Definition: tuplesort.h:90

Function Documentation

◆ tuplesort_attach_shared()

void tuplesort_attach_shared ( Sharedsort shared,
dsm_segment seg 
)

Definition at line 2945 of file tuplesort.c.

2946{
2947 /* Attach to SharedFileSet */
2948 SharedFileSetAttach(&shared->fileset, seg);
2949}
void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg)
Definition: sharedfileset.c:56
SharedFileSet fileset
Definition: tuplesort.c:358

References Sharedsort::fileset, and SharedFileSetAttach().

Referenced by _brin_parallel_build_main(), _bt_parallel_build_main(), and _gin_parallel_build_main().

◆ tuplesort_begin_cluster()

Tuplesortstate * tuplesort_begin_cluster ( TupleDesc  tupDesc,
Relation  indexRel,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 252 of file tuplesortvariants.c.

256{
257 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
258 sortopt);
260 BTScanInsert indexScanKey;
261 MemoryContext oldcontext;
263 int i;
264
265 Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
266
267 oldcontext = MemoryContextSwitchTo(base->maincontext);
269
270 if (trace_sort)
271 elog(LOG,
272 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
274 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
275
277
278 TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
279 false, /* no unique check */
280 base->nKeys,
281 workMem,
282 sortopt & TUPLESORT_RANDOMACCESS,
283 PARALLEL_SORT(coordinate));
284
289 base->readtup = readtup_cluster;
291 base->arg = arg;
292
293 arg->indexInfo = BuildIndexInfo(indexRel);
294
295 /*
296 * If we don't have a simple leading attribute, we don't currently
297 * initialize datum1, so disable optimizations that require it.
298 */
299 if (arg->indexInfo->ii_IndexAttrNumbers[0] == 0)
300 base->haveDatum1 = false;
301 else
302 base->haveDatum1 = true;
303
304 arg->tupDesc = tupDesc; /* assume we need not copy tupDesc */
305
306 indexScanKey = _bt_mkscankey(indexRel, NULL);
307
308 if (arg->indexInfo->ii_Expressions != NULL)
309 {
310 TupleTableSlot *slot;
311 ExprContext *econtext;
312
313 /*
314 * We will need to use FormIndexDatum to evaluate the index
315 * expressions. To do that, we need an EState, as well as a
316 * TupleTableSlot to put the table tuples into. The econtext's
317 * scantuple has to point to that slot, too.
318 */
319 arg->estate = CreateExecutorState();
321 econtext = GetPerTupleExprContext(arg->estate);
322 econtext->ecxt_scantuple = slot;
323 }
324
325 /* Prepare SortSupport data for each column */
326 base->sortKeys = (SortSupport) palloc0(base->nKeys *
327 sizeof(SortSupportData));
328
329 for (i = 0; i < base->nKeys; i++)
330 {
331 SortSupport sortKey = base->sortKeys + i;
332 ScanKey scanKey = indexScanKey->scankeys + i;
333 bool reverse;
334
336 sortKey->ssup_collation = scanKey->sk_collation;
337 sortKey->ssup_nulls_first =
338 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
339 sortKey->ssup_attno = scanKey->sk_attno;
340 /* Convey if abbreviation optimization is applicable in principle */
341 sortKey->abbreviate = (i == 0 && base->haveDatum1);
342
343 Assert(sortKey->ssup_attno != 0);
344
345 reverse = (scanKey->sk_flags & SK_BT_DESC) != 0;
346
347 PrepareSortSupportFromIndexRel(indexRel, reverse, sortKey);
348 }
349
350 pfree(indexScanKey);
351
352 MemoryContextSwitchTo(oldcontext);
353
354 return state;
355}
#define LOG
Definition: elog.h:31
#define elog(elevel,...)
Definition: elog.h:226
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1427
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:85
EState * CreateExecutorState(void)
Definition: execUtils.c:88
#define GetPerTupleExprContext(estate)
Definition: executor.h:656
#define palloc0_object(type)
Definition: fe_memutils.h:75
Assert(PointerIsAligned(start, uint64))
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:2428
int i
Definition: isn.c:77
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc0(Size size)
Definition: mcxt.c:1395
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1117
#define SK_BT_DESC
Definition: nbtree.h:1116
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:59
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
void * arg
#define RelationGetNumberOfAttributes(relation)
Definition: rel.h:521
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:534
void PrepareSortSupportFromIndexRel(Relation indexRel, bool reverse, SortSupport ssup)
Definition: sortsupport.c:161
struct SortSupportData * SortSupport
Definition: sortsupport.h:58
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:804
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:273
Form_pg_class rd_rel
Definition: rel.h:111
int sk_flags
Definition: skey.h:66
Oid sk_collation
Definition: skey.h:70
AttrNumber sk_attno
Definition: skey.h:67
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
MemoryContext ssup_cxt
Definition: sortsupport.h:66
MemoryContext maincontext
Definition: tuplesort.h:219
void(* writetup)(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
Definition: tuplesort.h:195
void(* removeabbrev)(Tuplesortstate *state, SortTuple *stups, int count)
Definition: tuplesort.h:188
void(* freestate)(Tuplesortstate *state)
Definition: tuplesort.h:213
void(* readtup)(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
Definition: tuplesort.h:204
SortTupleComparator comparetup
Definition: tuplesort.h:175
SortSupport sortKeys
Definition: tuplesort.h:236
SortTupleComparator comparetup_tiebreak
Definition: tuplesort.h:182
Definition: regguts.h:323
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
Definition: tuplesort.c:636
bool trace_sort
Definition: tuplesort.c:122
#define PARALLEL_SORT(coordinate)
Definition: tuplesort.h:256
#define TUPLESORT_RANDOMACCESS
Definition: tuplesort.h:97
#define TuplesortstateGetPublic(state)
Definition: tuplesort.h:260
static int comparetup_cluster_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_cluster(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int tuplen)
#define CLUSTER_SORT
static void freestate_cluster(Tuplesortstate *state)
static int comparetup_cluster(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void writetup_cluster(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void removeabbrev_cluster(Tuplesortstate *state, SortTuple *stups, int count)

References _bt_mkscankey(), SortSupportData::abbreviate, arg, TuplesortPublic::arg, Assert(), BuildIndexInfo(), CLUSTER_SORT, TuplesortPublic::comparetup, comparetup_cluster(), comparetup_cluster_tiebreak(), TuplesortPublic::comparetup_tiebreak, CreateExecutorState(), CurrentMemoryContext, ExprContext::ecxt_scantuple, elog, TuplesortPublic::freestate, freestate_cluster(), GetPerTupleExprContext, TuplesortPublic::haveDatum1, i, IndexRelationGetNumberOfKeyAttributes, LOG, TuplesortPublic::maincontext, MakeSingleTupleTableSlot(), MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc0(), palloc0_object, PARALLEL_SORT, pfree(), PrepareSortSupportFromIndexRel(), RelationData::rd_rel, TuplesortPublic::readtup, readtup_cluster(), RelationGetNumberOfAttributes, TuplesortPublic::removeabbrev, removeabbrev_cluster(), BTScanInsertData::scankeys, ScanKeyData::sk_attno, SK_BT_DESC, SK_BT_NULLS_FIRST, ScanKeyData::sk_collation, ScanKeyData::sk_flags, TuplesortPublic::sortKeys, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, TTSOpsHeapTuple, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_cluster().

Referenced by heapam_relation_copy_for_cluster().

◆ tuplesort_begin_common()

Tuplesortstate * tuplesort_begin_common ( int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 636 of file tuplesort.c.

637{
639 MemoryContext maincontext;
640 MemoryContext sortcontext;
641 MemoryContext oldcontext;
642
643 /* See leader_takeover_tapes() remarks on random access support */
644 if (coordinate && (sortopt & TUPLESORT_RANDOMACCESS))
645 elog(ERROR, "random access disallowed under parallel sort");
646
647 /*
648 * Memory context surviving tuplesort_reset. This memory context holds
649 * data which is useful to keep while sorting multiple similar batches.
650 */
652 "TupleSort main",
654
655 /*
656 * Create a working memory context for one sort operation. The content of
657 * this context is deleted by tuplesort_reset.
658 */
659 sortcontext = AllocSetContextCreate(maincontext,
660 "TupleSort sort",
662
663 /*
664 * Additionally a working memory context for tuples is setup in
665 * tuplesort_begin_batch.
666 */
667
668 /*
669 * Make the Tuplesortstate within the per-sortstate context. This way, we
670 * don't need a separate pfree() operation for it at shutdown.
671 */
672 oldcontext = MemoryContextSwitchTo(maincontext);
673
675
676 if (trace_sort)
677 pg_rusage_init(&state->ru_start);
678
679 state->base.sortopt = sortopt;
680 state->base.tuples = true;
681 state->abbrevNext = 10;
682
683 /*
684 * workMem is forced to be at least 64KB, the current minimum valid value
685 * for the work_mem GUC. This is a defense against parallel sort callers
686 * that divide out memory among many workers in a way that leaves each
687 * with very little memory.
688 */
689 state->allowedMem = Max(workMem, 64) * (int64) 1024;
690 state->base.sortcontext = sortcontext;
691 state->base.maincontext = maincontext;
692
693 state->memtupsize = INITIAL_MEMTUPSIZE;
694 state->memtuples = NULL;
695
696 /*
697 * After all of the other non-parallel-related state, we setup all of the
698 * state needed for each batch.
699 */
701
702 /*
703 * Initialize parallel-related state based on coordination information
704 * from caller
705 */
706 if (!coordinate)
707 {
708 /* Serial sort */
709 state->shared = NULL;
710 state->worker = -1;
711 state->nParticipants = -1;
712 }
713 else if (coordinate->isWorker)
714 {
715 /* Parallel worker produces exactly one final run from all input */
716 state->shared = coordinate->sharedsort;
718 state->nParticipants = -1;
719 }
720 else
721 {
722 /* Parallel leader state only used for final merge */
723 state->shared = coordinate->sharedsort;
724 state->worker = -1;
725 state->nParticipants = coordinate->nParticipants;
726 Assert(state->nParticipants >= 1);
727 }
728
729 MemoryContextSwitchTo(oldcontext);
730
731 return state;
732}
#define Max(x, y)
Definition: c.h:1010
int64_t int64
Definition: c.h:549
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
Sharedsort * sharedsort
Definition: tuplesort.h:59
#define INITIAL_MEMTUPSIZE
Definition: tuplesort.c:118
static int worker_get_identifier(Tuplesortstate *state)
Definition: tuplesort.c:2965
static void tuplesort_begin_batch(Tuplesortstate *state)
Definition: tuplesort.c:742

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert(), CurrentMemoryContext, elog, ERROR, INITIAL_MEMTUPSIZE, SortCoordinateData::isWorker, Max, MemoryContextSwitchTo(), SortCoordinateData::nParticipants, palloc0_object, pg_rusage_init(), SortCoordinateData::sharedsort, trace_sort, tuplesort_begin_batch(), TUPLESORT_RANDOMACCESS, and worker_get_identifier().

Referenced by tuplesort_begin_cluster(), tuplesort_begin_datum(), tuplesort_begin_heap(), tuplesort_begin_index_brin(), tuplesort_begin_index_btree(), tuplesort_begin_index_gin(), tuplesort_begin_index_gist(), and tuplesort_begin_index_hash().

◆ tuplesort_begin_datum()

Tuplesortstate * tuplesort_begin_datum ( Oid  datumType,
Oid  sortOperator,
Oid  sortCollation,
bool  nullsFirstFlag,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 652 of file tuplesortvariants.c.

655{
656 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
657 sortopt);
660 MemoryContext oldcontext;
661 int16 typlen;
662 bool typbyval;
663
664 oldcontext = MemoryContextSwitchTo(base->maincontext);
666
667 if (trace_sort)
668 elog(LOG,
669 "begin datum sort: workMem = %d, randomAccess = %c",
670 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
671
672 base->nKeys = 1; /* always a one-column sort */
673
674 TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
675 false, /* no unique check */
676 1,
677 workMem,
678 sortopt & TUPLESORT_RANDOMACCESS,
679 PARALLEL_SORT(coordinate));
680
684 base->writetup = writetup_datum;
685 base->readtup = readtup_datum;
686 base->haveDatum1 = true;
687 base->arg = arg;
688
689 arg->datumType = datumType;
690
691 /* lookup necessary attributes of the datum type */
692 get_typlenbyval(datumType, &typlen, &typbyval);
693 arg->datumTypeLen = typlen;
694 base->tuples = !typbyval;
695
696 /* Prepare SortSupport data */
698
700 base->sortKeys->ssup_collation = sortCollation;
701 base->sortKeys->ssup_nulls_first = nullsFirstFlag;
702
703 /*
704 * Abbreviation is possible here only for by-reference types. In theory,
705 * a pass-by-value datatype could have an abbreviated form that is cheaper
706 * to compare. In a tuple sort, we could support that, because we can
707 * always extract the original datum from the tuple as needed. Here, we
708 * can't, because a datum sort only stores a single copy of the datum; the
709 * "tuple" field of each SortTuple is NULL.
710 */
711 base->sortKeys->abbreviate = !typbyval;
712
713 PrepareSortSupportFromOrderingOp(sortOperator, base->sortKeys);
714
715 /*
716 * The "onlyKey" optimization cannot be used with abbreviated keys, since
717 * tie-breaker comparisons may be required. Typically, the optimization
718 * is only of value to pass-by-value types anyway, whereas abbreviated
719 * keys are typically only of value to pass-by-reference types.
720 */
721 if (!base->sortKeys->abbrev_converter)
722 base->onlyKey = base->sortKeys;
723
724 MemoryContextSwitchTo(oldcontext);
725
726 return state;
727}
int16_t int16
Definition: c.h:547
#define palloc_object(type)
Definition: fe_memutils.h:74
void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
Definition: lsyscache.c:2416
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
Definition: sortsupport.c:134
Datum(* abbrev_converter)(Datum original, SortSupport ssup)
Definition: sortsupport.h:172
SortSupport onlyKey
Definition: tuplesort.h:246
static void removeabbrev_datum(Tuplesortstate *state, SortTuple *stups, int count)
static int comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_datum_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_datum(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void writetup_datum(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
#define DATUM_SORT

References SortSupportData::abbrev_converter, SortSupportData::abbreviate, arg, TuplesortPublic::arg, TuplesortPublic::comparetup, comparetup_datum(), comparetup_datum_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, DATUM_SORT, elog, get_typlenbyval(), TuplesortPublic::haveDatum1, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, TuplesortPublic::onlyKey, palloc0_object, palloc_object, PARALLEL_SORT, PrepareSortSupportFromOrderingOp(), TuplesortPublic::readtup, readtup_datum(), TuplesortPublic::removeabbrev, removeabbrev_datum(), TuplesortPublic::sortKeys, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, TuplesortPublic::tuples, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_datum().

Referenced by array_sort_internal(), ExecSort(), initialize_aggregate(), ordered_set_startup(), and validate_index().

◆ tuplesort_begin_heap()

Tuplesortstate * tuplesort_begin_heap ( TupleDesc  tupDesc,
int  nkeys,
AttrNumber attNums,
Oid sortOperators,
Oid sortCollations,
bool *  nullsFirstFlags,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 179 of file tuplesortvariants.c.

184{
185 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
186 sortopt);
188 MemoryContext oldcontext;
189 int i;
190
191 oldcontext = MemoryContextSwitchTo(base->maincontext);
192
193 Assert(nkeys > 0);
194
195 if (trace_sort)
196 elog(LOG,
197 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
198 nkeys, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
199
200 base->nKeys = nkeys;
201
202 TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
203 false, /* no unique check */
204 nkeys,
205 workMem,
206 sortopt & TUPLESORT_RANDOMACCESS,
207 PARALLEL_SORT(coordinate));
208
212 base->writetup = writetup_heap;
213 base->readtup = readtup_heap;
214 base->haveDatum1 = true;
215 base->arg = tupDesc; /* assume we need not copy tupDesc */
216
217 /* Prepare SortSupport data for each column */
218 base->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
219
220 for (i = 0; i < nkeys; i++)
221 {
222 SortSupport sortKey = base->sortKeys + i;
223
224 Assert(attNums[i] != 0);
225 Assert(sortOperators[i] != 0);
226
228 sortKey->ssup_collation = sortCollations[i];
229 sortKey->ssup_nulls_first = nullsFirstFlags[i];
230 sortKey->ssup_attno = attNums[i];
231 /* Convey if abbreviation optimization is applicable in principle */
232 sortKey->abbreviate = (i == 0 && base->haveDatum1);
233
234 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
235 }
236
237 /*
238 * The "onlyKey" optimization cannot be used with abbreviated keys, since
239 * tie-breaker comparisons may be required. Typically, the optimization
240 * is only of value to pass-by-value types anyway, whereas abbreviated
241 * keys are typically only of value to pass-by-reference types.
242 */
243 if (nkeys == 1 && !base->sortKeys->abbrev_converter)
244 base->onlyKey = base->sortKeys;
245
246 MemoryContextSwitchTo(oldcontext);
247
248 return state;
249}
static void readtup_heap(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static int comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void writetup_heap(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static int comparetup_heap_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void removeabbrev_heap(Tuplesortstate *state, SortTuple *stups, int count)
#define HEAP_SORT

References SortSupportData::abbrev_converter, SortSupportData::abbreviate, TuplesortPublic::arg, Assert(), TuplesortPublic::comparetup, comparetup_heap(), comparetup_heap_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, elog, TuplesortPublic::haveDatum1, HEAP_SORT, i, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, TuplesortPublic::onlyKey, palloc0(), PARALLEL_SORT, PrepareSortSupportFromOrderingOp(), TuplesortPublic::readtup, readtup_heap(), TuplesortPublic::removeabbrev, removeabbrev_heap(), TuplesortPublic::sortKeys, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_heap().

Referenced by ExecIncrementalSort(), ExecSort(), initialize_aggregate(), initialize_phase(), ordered_set_startup(), and switchToPresortedPrefixMode().

◆ tuplesort_begin_index_brin()

Tuplesortstate * tuplesort_begin_index_brin ( int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 554 of file tuplesortvariants.c.

557{
558 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
559 sortopt);
561
562 if (trace_sort)
563 elog(LOG,
564 "begin index sort: workMem = %d, randomAccess = %c",
565 workMem,
566 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
567
568 base->nKeys = 1; /* Only one sort column, the block number */
569
574 base->haveDatum1 = true;
575 base->arg = NULL;
576
577 return state;
578}
static void writetup_index_brin(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void removeabbrev_index_brin(Tuplesortstate *state, SortTuple *stups, int count)
static void readtup_index_brin(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static int comparetup_index_brin(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)

References TuplesortPublic::arg, TuplesortPublic::comparetup, comparetup_index_brin(), elog, TuplesortPublic::haveDatum1, LOG, TuplesortPublic::nKeys, TuplesortPublic::readtup, readtup_index_brin(), TuplesortPublic::removeabbrev, removeabbrev_index_brin(), trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index_brin().

Referenced by _brin_parallel_scan_and_build(), and brinbuild().

◆ tuplesort_begin_index_btree()

Tuplesortstate * tuplesort_begin_index_btree ( Relation  heapRel,
Relation  indexRel,
bool  enforceUnique,
bool  uniqueNullsNotDistinct,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 358 of file tuplesortvariants.c.

365{
366 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
367 sortopt);
369 BTScanInsert indexScanKey;
371 MemoryContext oldcontext;
372 int i;
373
374 oldcontext = MemoryContextSwitchTo(base->maincontext);
376
377 if (trace_sort)
378 elog(LOG,
379 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
380 enforceUnique ? 't' : 'f',
381 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
382
384
385 TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
386 enforceUnique,
387 base->nKeys,
388 workMem,
389 sortopt & TUPLESORT_RANDOMACCESS,
390 PARALLEL_SORT(coordinate));
391
395 base->writetup = writetup_index;
396 base->readtup = readtup_index;
397 base->haveDatum1 = true;
398 base->arg = arg;
399
400 arg->index.heapRel = heapRel;
401 arg->index.indexRel = indexRel;
402 arg->enforceUnique = enforceUnique;
403 arg->uniqueNullsNotDistinct = uniqueNullsNotDistinct;
404
405 indexScanKey = _bt_mkscankey(indexRel, NULL);
406
407 /* Prepare SortSupport data for each column */
408 base->sortKeys = (SortSupport) palloc0(base->nKeys *
409 sizeof(SortSupportData));
410
411 for (i = 0; i < base->nKeys; i++)
412 {
413 SortSupport sortKey = base->sortKeys + i;
414 ScanKey scanKey = indexScanKey->scankeys + i;
415 bool reverse;
416
418 sortKey->ssup_collation = scanKey->sk_collation;
419 sortKey->ssup_nulls_first =
420 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
421 sortKey->ssup_attno = scanKey->sk_attno;
422 /* Convey if abbreviation optimization is applicable in principle */
423 sortKey->abbreviate = (i == 0 && base->haveDatum1);
424
425 Assert(sortKey->ssup_attno != 0);
426
427 reverse = (scanKey->sk_flags & SK_BT_DESC) != 0;
428
429 PrepareSortSupportFromIndexRel(indexRel, reverse, sortKey);
430 }
431
432 pfree(indexScanKey);
433
434 MemoryContextSwitchTo(oldcontext);
435
436 return state;
437}
static int comparetup_index_btree_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_btree(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_index(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void removeabbrev_index(Tuplesortstate *state, SortTuple *stups, int count)
#define INDEX_SORT
static void writetup_index(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)

References _bt_mkscankey(), SortSupportData::abbreviate, arg, TuplesortPublic::arg, Assert(), TuplesortPublic::comparetup, comparetup_index_btree(), comparetup_index_btree_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, elog, TuplesortPublic::haveDatum1, i, INDEX_SORT, IndexRelationGetNumberOfKeyAttributes, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc0(), palloc_object, PARALLEL_SORT, pfree(), PrepareSortSupportFromIndexRel(), TuplesortPublic::readtup, readtup_index(), TuplesortPublic::removeabbrev, removeabbrev_index(), BTScanInsertData::scankeys, ScanKeyData::sk_attno, SK_BT_DESC, SK_BT_NULLS_FIRST, ScanKeyData::sk_collation, ScanKeyData::sk_flags, TuplesortPublic::sortKeys, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index().

Referenced by _bt_parallel_scan_and_sort(), and _bt_spools_heapscan().

◆ tuplesort_begin_index_gin()

Tuplesortstate * tuplesort_begin_index_gin ( Relation  heapRel,
Relation  indexRel,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 581 of file tuplesortvariants.c.

585{
586 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
587 sortopt);
589 MemoryContext oldcontext;
590 int i;
591 TupleDesc desc = RelationGetDescr(indexRel);
592
593 oldcontext = MemoryContextSwitchTo(base->maincontext);
594
595#ifdef TRACE_SORT
596 if (trace_sort)
597 elog(LOG,
598 "begin index sort: workMem = %d, randomAccess = %c",
599 workMem,
600 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
601#endif
602
603 /*
604 * Multi-column GIN indexes expand the row into a separate index entry for
605 * attribute, and that's what we write into the tuplesort. But we still
606 * need to initialize sortsupport for all the attributes.
607 */
609
610 /* Prepare SortSupport data for each column */
611 base->sortKeys = (SortSupport) palloc0(base->nKeys *
612 sizeof(SortSupportData));
613
614 for (i = 0; i < base->nKeys; i++)
615 {
616 SortSupport sortKey = base->sortKeys + i;
617 Form_pg_attribute att = TupleDescAttr(desc, i);
618 TypeCacheEntry *typentry;
619
621 sortKey->ssup_collation = indexRel->rd_indcollation[i];
622 sortKey->ssup_nulls_first = false;
623 sortKey->ssup_attno = i + 1;
624 sortKey->abbreviate = false;
625
626 Assert(sortKey->ssup_attno != 0);
627
628 if (!OidIsValid(sortKey->ssup_collation))
629 sortKey->ssup_collation = DEFAULT_COLLATION_OID;
630
631 /*
632 * Look for a ordering for the index key data type, and then the sort
633 * support function.
634 */
635 typentry = lookup_type_cache(att->atttypid, TYPECACHE_LT_OPR);
636 PrepareSortSupportFromOrderingOp(typentry->lt_opr, sortKey);
637 }
638
643 base->haveDatum1 = false;
644 base->arg = NULL;
645
646 MemoryContextSwitchTo(oldcontext);
647
648 return state;
649}
#define OidIsValid(objectId)
Definition: c.h:788
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:202
#define RelationGetDescr(relation)
Definition: rel.h:541
Oid * rd_indcollation
Definition: rel.h:217
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:160
static void writetup_index_gin(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void readtup_index_gin(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void removeabbrev_index_gin(Tuplesortstate *state, SortTuple *stups, int count)
static int comparetup_index_gin(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:386
#define TYPECACHE_LT_OPR
Definition: typcache.h:139

References SortSupportData::abbreviate, TuplesortPublic::arg, Assert(), TuplesortPublic::comparetup, comparetup_index_gin(), CurrentMemoryContext, elog, TuplesortPublic::haveDatum1, i, IndexRelationGetNumberOfKeyAttributes, LOG, lookup_type_cache(), TypeCacheEntry::lt_opr, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, OidIsValid, palloc0(), PrepareSortSupportFromOrderingOp(), RelationData::rd_indcollation, TuplesortPublic::readtup, readtup_index_gin(), RelationGetDescr, TuplesortPublic::removeabbrev, removeabbrev_index_gin(), TuplesortPublic::sortKeys, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, TupleDescAttr(), tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TYPECACHE_LT_OPR, TuplesortPublic::writetup, and writetup_index_gin().

Referenced by _gin_parallel_scan_and_build(), and ginbuild().

◆ tuplesort_begin_index_gist()

Tuplesortstate * tuplesort_begin_index_gist ( Relation  heapRel,
Relation  indexRel,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 491 of file tuplesortvariants.c.

496{
497 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
498 sortopt);
500 MemoryContext oldcontext;
502 int i;
503
504 oldcontext = MemoryContextSwitchTo(base->maincontext);
506
507 if (trace_sort)
508 elog(LOG,
509 "begin index sort: workMem = %d, randomAccess = %c",
510 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
511
513
517 base->writetup = writetup_index;
518 base->readtup = readtup_index;
519 base->haveDatum1 = true;
520 base->arg = arg;
521
522 arg->index.heapRel = heapRel;
523 arg->index.indexRel = indexRel;
524 arg->enforceUnique = false;
525 arg->uniqueNullsNotDistinct = false;
526
527 /* Prepare SortSupport data for each column */
528 base->sortKeys = (SortSupport) palloc0(base->nKeys *
529 sizeof(SortSupportData));
530
531 for (i = 0; i < base->nKeys; i++)
532 {
533 SortSupport sortKey = base->sortKeys + i;
534
536 sortKey->ssup_collation = indexRel->rd_indcollation[i];
537 sortKey->ssup_nulls_first = false;
538 sortKey->ssup_attno = i + 1;
539 /* Convey if abbreviation optimization is applicable in principle */
540 sortKey->abbreviate = (i == 0 && base->haveDatum1);
541
542 Assert(sortKey->ssup_attno != 0);
543
544 /* Look for a sort support function */
545 PrepareSortSupportFromGistIndexRel(indexRel, sortKey);
546 }
547
548 MemoryContextSwitchTo(oldcontext);
549
550 return state;
551}
void PrepareSortSupportFromGistIndexRel(Relation indexRel, SortSupport ssup)
Definition: sortsupport.c:185

References SortSupportData::abbreviate, arg, TuplesortPublic::arg, Assert(), TuplesortPublic::comparetup, comparetup_index_btree(), comparetup_index_btree_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, elog, TuplesortPublic::haveDatum1, i, IndexRelationGetNumberOfKeyAttributes, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc0(), palloc_object, PrepareSortSupportFromGistIndexRel(), RelationData::rd_indcollation, TuplesortPublic::readtup, readtup_index(), TuplesortPublic::removeabbrev, removeabbrev_index(), TuplesortPublic::sortKeys, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index().

Referenced by gistbuild().

◆ tuplesort_begin_index_hash()

Tuplesortstate * tuplesort_begin_index_hash ( Relation  heapRel,
Relation  indexRel,
uint32  high_mask,
uint32  low_mask,
uint32  max_buckets,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 440 of file tuplesortvariants.c.

448{
449 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
450 sortopt);
452 MemoryContext oldcontext;
454
455 oldcontext = MemoryContextSwitchTo(base->maincontext);
457
458 if (trace_sort)
459 elog(LOG,
460 "begin index sort: high_mask = 0x%x, low_mask = 0x%x, "
461 "max_buckets = 0x%x, workMem = %d, randomAccess = %c",
462 high_mask,
463 low_mask,
464 max_buckets,
465 workMem,
466 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
467
468 base->nKeys = 1; /* Only one sort column, the hash code */
469
473 base->writetup = writetup_index;
474 base->readtup = readtup_index;
475 base->haveDatum1 = true;
476 base->arg = arg;
477
478 arg->index.heapRel = heapRel;
479 arg->index.indexRel = indexRel;
480
481 arg->high_mask = high_mask;
482 arg->low_mask = low_mask;
483 arg->max_buckets = max_buckets;
484
485 MemoryContextSwitchTo(oldcontext);
486
487 return state;
488}
static int comparetup_index_hash(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_hash_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)

References arg, TuplesortPublic::arg, TuplesortPublic::comparetup, comparetup_index_hash(), comparetup_index_hash_tiebreak(), TuplesortPublic::comparetup_tiebreak, elog, TuplesortPublic::haveDatum1, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc_object, TuplesortPublic::readtup, readtup_index(), TuplesortPublic::removeabbrev, removeabbrev_index(), trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index().

Referenced by _h_spoolinit().

◆ tuplesort_end()

◆ tuplesort_estimate_shared()

Size tuplesort_estimate_shared ( int  nWorkers)

Definition at line 2901 of file tuplesort.c.

2902{
2903 Size tapesSize;
2904
2905 Assert(nWorkers > 0);
2906
2907 /* Make sure that BufFile shared state is MAXALIGN'd */
2908 tapesSize = mul_size(sizeof(TapeShare), nWorkers);
2909 tapesSize = MAXALIGN(add_size(tapesSize, offsetof(Sharedsort, tapes)));
2910
2911 return tapesSize;
2912}
#define MAXALIGN(LEN)
Definition: c.h:824
size_t Size
Definition: c.h:624
Size add_size(Size s1, Size s2)
Definition: shmem.c:495
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510

References add_size(), Assert(), MAXALIGN, and mul_size().

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), and _gin_begin_parallel().

◆ tuplesort_get_stats()

void tuplesort_get_stats ( Tuplesortstate state,
TuplesortInstrumentation stats 
)

Definition at line 2485 of file tuplesort.c.

2487{
2488 /*
2489 * Note: it might seem we should provide both memory and disk usage for a
2490 * disk-based sort. However, the current code doesn't track memory space
2491 * accurately once we have begun to return tuples to the caller (since we
2492 * don't account for pfree's the caller is expected to do), so we cannot
2493 * rely on availMem in a disk sort. This does not seem worth the overhead
2494 * to fix. Is it worth creating an API for the memory context code to
2495 * tell us how much is actually used in sortcontext?
2496 */
2498
2499 if (state->isMaxSpaceDisk)
2501 else
2503 stats->spaceUsed = (state->maxSpace + 1023) / 1024;
2504
2505 switch (state->maxSpaceStatus)
2506 {
2507 case TSS_SORTEDINMEM:
2508 if (state->boundUsed)
2510 else
2512 break;
2513 case TSS_SORTEDONTAPE:
2515 break;
2516 case TSS_FINALMERGE:
2518 break;
2519 default:
2521 break;
2522 }
2523}
TuplesortMethod sortMethod
Definition: tuplesort.h:113
TuplesortSpaceType spaceType
Definition: tuplesort.h:114
@ TSS_SORTEDONTAPE
Definition: tuplesort.c:158
@ TSS_SORTEDINMEM
Definition: tuplesort.c:157
@ TSS_FINALMERGE
Definition: tuplesort.c:159
static void tuplesort_updatemax(Tuplesortstate *state)
Definition: tuplesort.c:954

References SORT_SPACE_TYPE_DISK, SORT_SPACE_TYPE_MEMORY, SORT_TYPE_EXTERNAL_MERGE, SORT_TYPE_EXTERNAL_SORT, SORT_TYPE_QUICKSORT, SORT_TYPE_STILL_IN_PROGRESS, SORT_TYPE_TOP_N_HEAPSORT, TuplesortInstrumentation::sortMethod, TuplesortInstrumentation::spaceType, TuplesortInstrumentation::spaceUsed, TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and tuplesort_updatemax().

Referenced by ExecSort(), instrumentSortedGroup(), and show_sort_info().

◆ tuplesort_getbrintuple()

BrinTuple * tuplesort_getbrintuple ( Tuplesortstate state,
Size len,
bool  forward 
)

Definition at line 1067 of file tuplesortvariants.c.

1068{
1071 SortTuple stup;
1072 BrinSortTuple *btup;
1073
1074 if (!tuplesort_gettuple_common(state, forward, &stup))
1075 stup.tuple = NULL;
1076
1077 MemoryContextSwitchTo(oldcontext);
1078
1079 if (!stup.tuple)
1080 return NULL;
1081
1082 btup = (BrinSortTuple *) stup.tuple;
1083
1084 *len = btup->tuplen;
1085
1086 return &btup->tuple;
1087}
void * tuple
Definition: tuplesort.h:150
MemoryContext sortcontext
Definition: tuplesort.h:221
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
Definition: tuplesort.c:1456

References len, MemoryContextSwitchTo(), TuplesortPublic::sortcontext, BrinSortTuple::tuple, SortTuple::tuple, BrinSortTuple::tuplen, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by _brin_parallel_merge().

◆ tuplesort_getdatum()

bool tuplesort_getdatum ( Tuplesortstate state,
bool  forward,
bool  copy,
Datum val,
bool *  isNull,
Datum abbrev 
)

Definition at line 1137 of file tuplesortvariants.c.

1139{
1143 SortTuple stup;
1144
1145 if (!tuplesort_gettuple_common(state, forward, &stup))
1146 {
1147 MemoryContextSwitchTo(oldcontext);
1148 return false;
1149 }
1150
1151 /* Ensure we copy into caller's memory context */
1152 MemoryContextSwitchTo(oldcontext);
1153
1154 /* Record abbreviated key for caller */
1155 if (base->sortKeys->abbrev_converter && abbrev)
1156 *abbrev = stup.datum1;
1157
1158 if (stup.isnull1 || !base->tuples)
1159 {
1160 *val = stup.datum1;
1161 *isNull = stup.isnull1;
1162 }
1163 else
1164 {
1165 /* use stup.tuple because stup.datum1 may be an abbreviation */
1166 if (copy)
1167 *val = datumCopy(PointerGetDatum(stup.tuple), false,
1168 arg->datumTypeLen);
1169 else
1170 *val = PointerGetDatum(stup.tuple);
1171 *isNull = false;
1172 }
1173
1174 return true;
1175}
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
long val
Definition: informix.c:689
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, datumCopy(), if(), MemoryContextSwitchTo(), PointerGetDatum(), TuplesortPublic::sortcontext, TuplesortPublic::sortKeys, TuplesortPublic::tuples, tuplesort_gettuple_common(), TuplesortstateGetPublic, and val.

Referenced by array_sort_internal(), ExecSort(), heapam_index_validate_scan(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), percentile_disc_multi_final(), and process_ordered_aggregate_single().

◆ tuplesort_getgintuple()

GinTuple * tuplesort_getgintuple ( Tuplesortstate state,
Size len,
bool  forward 
)

Definition at line 1090 of file tuplesortvariants.c.

1091{
1094 SortTuple stup;
1095 GinTuple *tup;
1096
1097 if (!tuplesort_gettuple_common(state, forward, &stup))
1098 stup.tuple = NULL;
1099
1100 MemoryContextSwitchTo(oldcontext);
1101
1102 if (!stup.tuple)
1103 return NULL;
1104
1105 tup = (GinTuple *) stup.tuple;
1106
1107 *len = tup->tuplen;
1108
1109 return tup;
1110}
int tuplen
Definition: gin_tuple.h:24

References len, MemoryContextSwitchTo(), TuplesortPublic::sortcontext, SortTuple::tuple, GinTuple::tuplen, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by _gin_parallel_merge(), and _gin_process_worker_data().

◆ tuplesort_getheaptuple()

HeapTuple tuplesort_getheaptuple ( Tuplesortstate state,
bool  forward 
)

Definition at line 1025 of file tuplesortvariants.c.

1026{
1029 SortTuple stup;
1030
1031 if (!tuplesort_gettuple_common(state, forward, &stup))
1032 stup.tuple = NULL;
1033
1034 MemoryContextSwitchTo(oldcontext);
1035
1036 return stup.tuple;
1037}

References MemoryContextSwitchTo(), TuplesortPublic::sortcontext, SortTuple::tuple, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by heapam_relation_copy_for_cluster().

◆ tuplesort_getindextuple()

IndexTuple tuplesort_getindextuple ( Tuplesortstate state,
bool  forward 
)

Definition at line 1046 of file tuplesortvariants.c.

1047{
1050 SortTuple stup;
1051
1052 if (!tuplesort_gettuple_common(state, forward, &stup))
1053 stup.tuple = NULL;
1054
1055 MemoryContextSwitchTo(oldcontext);
1056
1057 return (IndexTuple) stup.tuple;
1058}

References MemoryContextSwitchTo(), TuplesortPublic::sortcontext, SortTuple::tuple, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by _bt_load(), _h_indexbuild(), and gist_indexsortbuild().

◆ tuplesort_gettuple_common()

bool tuplesort_gettuple_common ( Tuplesortstate state,
bool  forward,
SortTuple stup 
)

Definition at line 1456 of file tuplesort.c.

1458{
1459 unsigned int tuplen;
1460 size_t nmoved;
1461
1462 Assert(!WORKER(state));
1463
1464 switch (state->status)
1465 {
1466 case TSS_SORTEDINMEM:
1467 Assert(forward || state->base.sortopt & TUPLESORT_RANDOMACCESS);
1468 Assert(!state->slabAllocatorUsed);
1469 if (forward)
1470 {
1471 if (state->current < state->memtupcount)
1472 {
1473 *stup = state->memtuples[state->current++];
1474 return true;
1475 }
1476 state->eof_reached = true;
1477
1478 /*
1479 * Complain if caller tries to retrieve more tuples than
1480 * originally asked for in a bounded sort. This is because
1481 * returning EOF here might be the wrong thing.
1482 */
1483 if (state->bounded && state->current >= state->bound)
1484 elog(ERROR, "retrieved too many tuples in a bounded sort");
1485
1486 return false;
1487 }
1488 else
1489 {
1490 if (state->current <= 0)
1491 return false;
1492
1493 /*
1494 * if all tuples are fetched already then we return last
1495 * tuple, else - tuple before last returned.
1496 */
1497 if (state->eof_reached)
1498 state->eof_reached = false;
1499 else
1500 {
1501 state->current--; /* last returned tuple */
1502 if (state->current <= 0)
1503 return false;
1504 }
1505 *stup = state->memtuples[state->current - 1];
1506 return true;
1507 }
1508 break;
1509
1510 case TSS_SORTEDONTAPE:
1511 Assert(forward || state->base.sortopt & TUPLESORT_RANDOMACCESS);
1512 Assert(state->slabAllocatorUsed);
1513
1514 /*
1515 * The slot that held the tuple that we returned in previous
1516 * gettuple call can now be reused.
1517 */
1518 if (state->lastReturnedTuple)
1519 {
1520 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1521 state->lastReturnedTuple = NULL;
1522 }
1523
1524 if (forward)
1525 {
1526 if (state->eof_reached)
1527 return false;
1528
1529 if ((tuplen = getlen(state->result_tape, true)) != 0)
1530 {
1531 READTUP(state, stup, state->result_tape, tuplen);
1532
1533 /*
1534 * Remember the tuple we return, so that we can recycle
1535 * its memory on next call. (This can be NULL, in the
1536 * !state->tuples case).
1537 */
1538 state->lastReturnedTuple = stup->tuple;
1539
1540 return true;
1541 }
1542 else
1543 {
1544 state->eof_reached = true;
1545 return false;
1546 }
1547 }
1548
1549 /*
1550 * Backward.
1551 *
1552 * if all tuples are fetched already then we return last tuple,
1553 * else - tuple before last returned.
1554 */
1555 if (state->eof_reached)
1556 {
1557 /*
1558 * Seek position is pointing just past the zero tuplen at the
1559 * end of file; back up to fetch last tuple's ending length
1560 * word. If seek fails we must have a completely empty file.
1561 */
1562 nmoved = LogicalTapeBackspace(state->result_tape,
1563 2 * sizeof(unsigned int));
1564 if (nmoved == 0)
1565 return false;
1566 else if (nmoved != 2 * sizeof(unsigned int))
1567 elog(ERROR, "unexpected tape position");
1568 state->eof_reached = false;
1569 }
1570 else
1571 {
1572 /*
1573 * Back up and fetch previously-returned tuple's ending length
1574 * word. If seek fails, assume we are at start of file.
1575 */
1576 nmoved = LogicalTapeBackspace(state->result_tape,
1577 sizeof(unsigned int));
1578 if (nmoved == 0)
1579 return false;
1580 else if (nmoved != sizeof(unsigned int))
1581 elog(ERROR, "unexpected tape position");
1582 tuplen = getlen(state->result_tape, false);
1583
1584 /*
1585 * Back up to get ending length word of tuple before it.
1586 */
1587 nmoved = LogicalTapeBackspace(state->result_tape,
1588 tuplen + 2 * sizeof(unsigned int));
1589 if (nmoved == tuplen + sizeof(unsigned int))
1590 {
1591 /*
1592 * We backed up over the previous tuple, but there was no
1593 * ending length word before it. That means that the prev
1594 * tuple is the first tuple in the file. It is now the
1595 * next to read in forward direction (not obviously right,
1596 * but that is what in-memory case does).
1597 */
1598 return false;
1599 }
1600 else if (nmoved != tuplen + 2 * sizeof(unsigned int))
1601 elog(ERROR, "bogus tuple length in backward scan");
1602 }
1603
1604 tuplen = getlen(state->result_tape, false);
1605
1606 /*
1607 * Now we have the length of the prior tuple, back up and read it.
1608 * Note: READTUP expects we are positioned after the initial
1609 * length word of the tuple, so back up to that point.
1610 */
1611 nmoved = LogicalTapeBackspace(state->result_tape,
1612 tuplen);
1613 if (nmoved != tuplen)
1614 elog(ERROR, "bogus tuple length in backward scan");
1615 READTUP(state, stup, state->result_tape, tuplen);
1616
1617 /*
1618 * Remember the tuple we return, so that we can recycle its memory
1619 * on next call. (This can be NULL, in the Datum case).
1620 */
1621 state->lastReturnedTuple = stup->tuple;
1622
1623 return true;
1624
1625 case TSS_FINALMERGE:
1626 Assert(forward);
1627 /* We are managing memory ourselves, with the slab allocator. */
1628 Assert(state->slabAllocatorUsed);
1629
1630 /*
1631 * The slab slot holding the tuple that we returned in previous
1632 * gettuple call can now be reused.
1633 */
1634 if (state->lastReturnedTuple)
1635 {
1636 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1637 state->lastReturnedTuple = NULL;
1638 }
1639
1640 /*
1641 * This code should match the inner loop of mergeonerun().
1642 */
1643 if (state->memtupcount > 0)
1644 {
1645 int srcTapeIndex = state->memtuples[0].srctape;
1646 LogicalTape *srcTape = state->inputTapes[srcTapeIndex];
1647 SortTuple newtup;
1648
1649 *stup = state->memtuples[0];
1650
1651 /*
1652 * Remember the tuple we return, so that we can recycle its
1653 * memory on next call. (This can be NULL, in the Datum case).
1654 */
1655 state->lastReturnedTuple = stup->tuple;
1656
1657 /*
1658 * Pull next tuple from tape, and replace the returned tuple
1659 * at top of the heap with it.
1660 */
1661 if (!mergereadnext(state, srcTape, &newtup))
1662 {
1663 /*
1664 * If no more data, we've reached end of run on this tape.
1665 * Remove the top node from the heap.
1666 */
1668 state->nInputRuns--;
1669
1670 /*
1671 * Close the tape. It'd go away at the end of the sort
1672 * anyway, but better to release the memory early.
1673 */
1674 LogicalTapeClose(srcTape);
1675 return true;
1676 }
1677 newtup.srctape = srcTapeIndex;
1679 return true;
1680 }
1681 return false;
1682
1683 default:
1684 elog(ERROR, "invalid tuplesort state");
1685 return false; /* keep compiler quiet */
1686 }
1687}
size_t LogicalTapeBackspace(LogicalTape *lt, size_t size)
Definition: logtape.c:1062
void LogicalTapeClose(LogicalTape *lt)
Definition: logtape.c:733
int srctape
Definition: tuplesort.h:153
static void tuplesort_heap_delete_top(Tuplesortstate *state)
Definition: tuplesort.c:2758
static unsigned int getlen(LogicalTape *tape, bool eofOK)
Definition: tuplesort.c:2840
#define READTUP(state, stup, tape, len)
Definition: tuplesort.c:396
#define WORKER(state)
Definition: tuplesort.c:402
static bool mergereadnext(Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
Definition: tuplesort.c:2274
#define RELEASE_SLAB_SLOT(state, tuple)
Definition: tuplesort.c:381
static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple)
Definition: tuplesort.c:2782

References Assert(), elog, ERROR, getlen(), LogicalTapeBackspace(), LogicalTapeClose(), mergereadnext(), READTUP, RELEASE_SLAB_SLOT, SortTuple::srctape, TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, SortTuple::tuple, tuplesort_heap_delete_top(), tuplesort_heap_replace_top(), TUPLESORT_RANDOMACCESS, and WORKER.

Referenced by tuplesort_getbrintuple(), tuplesort_getdatum(), tuplesort_getgintuple(), tuplesort_getheaptuple(), tuplesort_getindextuple(), tuplesort_gettupleslot(), and tuplesort_skiptuples().

◆ tuplesort_gettupleslot()

bool tuplesort_gettupleslot ( Tuplesortstate state,
bool  forward,
bool  copy,
TupleTableSlot slot,
Datum abbrev 
)

Definition at line 987 of file tuplesortvariants.c.

989{
992 SortTuple stup;
993
994 if (!tuplesort_gettuple_common(state, forward, &stup))
995 stup.tuple = NULL;
996
997 MemoryContextSwitchTo(oldcontext);
998
999 if (stup.tuple)
1000 {
1001 /* Record abbreviated key for caller */
1002 if (base->sortKeys->abbrev_converter && abbrev)
1003 *abbrev = stup.datum1;
1004
1005 if (copy)
1007
1008 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, copy);
1009 return true;
1010 }
1011 else
1012 {
1013 ExecClearTuple(slot);
1014 return false;
1015 }
1016}
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1635
MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup, Size extra)
Definition: heaptuple.c:1541
Datum datum1
Definition: tuplesort.h:151
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:457

References SortSupportData::abbrev_converter, SortTuple::datum1, ExecClearTuple(), ExecStoreMinimalTuple(), heap_copy_minimal_tuple(), MemoryContextSwitchTo(), TuplesortPublic::sortcontext, TuplesortPublic::sortKeys, SortTuple::tuple, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by ExecIncrementalSort(), ExecSort(), fetch_input_tuple(), hypothetical_dense_rank_final(), hypothetical_rank_common(), process_ordered_aggregate_multi(), and switchToPresortedPrefixMode().

◆ tuplesort_initialize_shared()

void tuplesort_initialize_shared ( Sharedsort shared,
int  nWorkers,
dsm_segment seg 
)

Definition at line 2922 of file tuplesort.c.

2923{
2924 int i;
2925
2926 Assert(nWorkers > 0);
2927
2928 SpinLockInit(&shared->mutex);
2929 shared->currentWorker = 0;
2930 shared->workersFinished = 0;
2931 SharedFileSetInit(&shared->fileset, seg);
2932 shared->nTapes = nWorkers;
2933 for (i = 0; i < nWorkers; i++)
2934 {
2935 shared->tapes[i].firstblocknumber = 0L;
2936 }
2937}
void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
Definition: sharedfileset.c:38
#define SpinLockInit(lock)
Definition: spin.h:57
TapeShare tapes[FLEXIBLE_ARRAY_MEMBER]
Definition: tuplesort.c:367
int workersFinished
Definition: tuplesort.c:355
int nTapes
Definition: tuplesort.c:361
slock_t mutex
Definition: tuplesort.c:344
int currentWorker
Definition: tuplesort.c:354
int64 firstblocknumber
Definition: logtape.h:54

References Assert(), Sharedsort::currentWorker, Sharedsort::fileset, TapeShare::firstblocknumber, i, Sharedsort::mutex, Sharedsort::nTapes, SharedFileSetInit(), SpinLockInit, Sharedsort::tapes, and Sharedsort::workersFinished.

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), and _gin_begin_parallel().

◆ tuplesort_markpos()

void tuplesort_markpos ( Tuplesortstate state)

Definition at line 2421 of file tuplesort.c.

2422{
2423 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2424
2425 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2426
2427 switch (state->status)
2428 {
2429 case TSS_SORTEDINMEM:
2430 state->markpos_offset = state->current;
2431 state->markpos_eof = state->eof_reached;
2432 break;
2433 case TSS_SORTEDONTAPE:
2434 LogicalTapeTell(state->result_tape,
2435 &state->markpos_block,
2436 &state->markpos_offset);
2437 state->markpos_eof = state->eof_reached;
2438 break;
2439 default:
2440 elog(ERROR, "invalid tuplesort state");
2441 break;
2442 }
2443
2444 MemoryContextSwitchTo(oldcontext);
2445}
void LogicalTapeTell(LogicalTape *lt, int64 *blocknum, int *offset)
Definition: logtape.c:1162

References Assert(), elog, ERROR, LogicalTapeTell(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecSortMarkPos().

◆ tuplesort_merge_order()

int tuplesort_merge_order ( int64  allowedMem)

Definition at line 1764 of file tuplesort.c.

1765{
1766 int mOrder;
1767
1768 /*----------
1769 * In the merge phase, we need buffer space for each input and output tape.
1770 * Each pass in the balanced merge algorithm reads from M input tapes, and
1771 * writes to N output tapes. Each tape consumes TAPE_BUFFER_OVERHEAD bytes
1772 * of memory. In addition to that, we want MERGE_BUFFER_SIZE workspace per
1773 * input tape.
1774 *
1775 * totalMem = M * (TAPE_BUFFER_OVERHEAD + MERGE_BUFFER_SIZE) +
1776 * N * TAPE_BUFFER_OVERHEAD
1777 *
1778 * Except for the last and next-to-last merge passes, where there can be
1779 * fewer tapes left to process, M = N. We choose M so that we have the
1780 * desired amount of memory available for the input buffers
1781 * (TAPE_BUFFER_OVERHEAD + MERGE_BUFFER_SIZE), given the total memory
1782 * available for the tape buffers (allowedMem).
1783 *
1784 * Note: you might be thinking we need to account for the memtuples[]
1785 * array in this calculation, but we effectively treat that as part of the
1786 * MERGE_BUFFER_SIZE workspace.
1787 *----------
1788 */
1789 mOrder = allowedMem /
1791
1792 /*
1793 * Even in minimum memory, use at least a MINORDER merge. On the other
1794 * hand, even when we have lots of memory, do not use more than a MAXORDER
1795 * merge. Tapes are pretty cheap, but they're not entirely free. Each
1796 * additional tape reduces the amount of memory available to build runs,
1797 * which in turn can cause the same sort to need more runs, which makes
1798 * merging slower even if it can still be done in a single pass. Also,
1799 * high order merges are quite slow due to CPU cache effects; it can be
1800 * faster to pay the I/O cost of a multi-pass merge than to perform a
1801 * single merge pass across many hundreds of tapes.
1802 */
1803 mOrder = Max(mOrder, MINORDER);
1804 mOrder = Min(mOrder, MAXORDER);
1805
1806 return mOrder;
1807}
#define Min(x, y)
Definition: c.h:1016
#define TAPE_BUFFER_OVERHEAD
Definition: tuplesort.c:176
#define MAXORDER
Definition: tuplesort.c:175
#define MERGE_BUFFER_SIZE
Definition: tuplesort.c:177
#define MINORDER
Definition: tuplesort.c:174

References Max, MAXORDER, MERGE_BUFFER_SIZE, Min, MINORDER, and TAPE_BUFFER_OVERHEAD.

Referenced by cost_tuplesort(), and inittapes().

◆ tuplesort_method_name()

const char * tuplesort_method_name ( TuplesortMethod  m)

Definition at line 2529 of file tuplesort.c.

2530{
2531 switch (m)
2532 {
2534 return "still in progress";
2536 return "top-N heapsort";
2538 return "quicksort";
2540 return "external sort";
2542 return "external merge";
2543 }
2544
2545 return "unknown";
2546}

References SORT_TYPE_EXTERNAL_MERGE, SORT_TYPE_EXTERNAL_SORT, SORT_TYPE_QUICKSORT, SORT_TYPE_STILL_IN_PROGRESS, and SORT_TYPE_TOP_N_HEAPSORT.

Referenced by show_incremental_sort_group_info(), and show_sort_info().

◆ tuplesort_performsort()

void tuplesort_performsort ( Tuplesortstate state)

Definition at line 1349 of file tuplesort.c.

1350{
1351 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1352
1353 if (trace_sort)
1354 elog(LOG, "performsort of worker %d starting: %s",
1355 state->worker, pg_rusage_show(&state->ru_start));
1356
1357 switch (state->status)
1358 {
1359 case TSS_INITIAL:
1360
1361 /*
1362 * We were able to accumulate all the tuples within the allowed
1363 * amount of memory, or leader to take over worker tapes
1364 */
1365 if (SERIAL(state))
1366 {
1367 /* Just qsort 'em and we're done */
1369 state->status = TSS_SORTEDINMEM;
1370 }
1371 else if (WORKER(state))
1372 {
1373 /*
1374 * Parallel workers must still dump out tuples to tape. No
1375 * merge is required to produce single output run, though.
1376 */
1377 inittapes(state, false);
1378 dumptuples(state, true);
1380 state->status = TSS_SORTEDONTAPE;
1381 }
1382 else
1383 {
1384 /*
1385 * Leader will take over worker tapes and merge worker runs.
1386 * Note that mergeruns sets the correct state->status.
1387 */
1390 }
1391 state->current = 0;
1392 state->eof_reached = false;
1393 state->markpos_block = 0L;
1394 state->markpos_offset = 0;
1395 state->markpos_eof = false;
1396 break;
1397
1398 case TSS_BOUNDED:
1399
1400 /*
1401 * We were able to accumulate all the tuples required for output
1402 * in memory, using a heap to eliminate excess tuples. Now we
1403 * have to transform the heap to a properly-sorted array. Note
1404 * that sort_bounded_heap sets the correct state->status.
1405 */
1407 state->current = 0;
1408 state->eof_reached = false;
1409 state->markpos_offset = 0;
1410 state->markpos_eof = false;
1411 break;
1412
1413 case TSS_BUILDRUNS:
1414
1415 /*
1416 * Finish tape-based sort. First, flush all tuples remaining in
1417 * memory out to tape; then merge until we have a single remaining
1418 * run (or, if !randomAccess and !WORKER(), one run per tape).
1419 * Note that mergeruns sets the correct state->status.
1420 */
1421 dumptuples(state, true);
1423 state->eof_reached = false;
1424 state->markpos_block = 0L;
1425 state->markpos_offset = 0;
1426 state->markpos_eof = false;
1427 break;
1428
1429 default:
1430 elog(ERROR, "invalid tuplesort state");
1431 break;
1432 }
1433
1434 if (trace_sort)
1435 {
1436 if (state->status == TSS_FINALMERGE)
1437 elog(LOG, "performsort of worker %d done (except %d-way final merge): %s",
1438 state->worker, state->nInputTapes,
1439 pg_rusage_show(&state->ru_start));
1440 else
1441 elog(LOG, "performsort of worker %d done: %s",
1442 state->worker, pg_rusage_show(&state->ru_start));
1443 }
1444
1445 MemoryContextSwitchTo(oldcontext);
1446}
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define SERIAL(state)
Definition: tuplesort.c:401
static void sort_bounded_heap(Tuplesortstate *state)
Definition: tuplesort.c:2622
@ TSS_INITIAL
Definition: tuplesort.c:154
@ TSS_BUILDRUNS
Definition: tuplesort.c:156
@ TSS_BOUNDED
Definition: tuplesort.c:155
static void leader_takeover_tapes(Tuplesortstate *state)
Definition: tuplesort.c:3053
static void tuplesort_sort_memtuples(Tuplesortstate *state)
Definition: tuplesort.c:2662
static void inittapes(Tuplesortstate *state, bool mergeruns)
Definition: tuplesort.c:1851
static void worker_nomergeruns(Tuplesortstate *state)
Definition: tuplesort.c:3031
static void mergeruns(Tuplesortstate *state)
Definition: tuplesort.c:2003
static void dumptuples(Tuplesortstate *state, bool alltuples)
Definition: tuplesort.c:2293

References dumptuples(), elog, ERROR, inittapes(), leader_takeover_tapes(), LOG, MemoryContextSwitchTo(), mergeruns(), pg_rusage_show(), SERIAL, sort_bounded_heap(), trace_sort, TSS_BOUNDED, TSS_BUILDRUNS, TSS_FINALMERGE, TSS_INITIAL, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, tuplesort_sort_memtuples(), WORKER, and worker_nomergeruns().

Referenced by _brin_parallel_merge(), _brin_parallel_scan_and_build(), _bt_leafbuild(), _bt_parallel_scan_and_sort(), _gin_parallel_merge(), _gin_parallel_scan_and_build(), _gin_process_worker_data(), _h_indexbuild(), array_sort_internal(), ExecIncrementalSort(), ExecSort(), gistbuild(), heapam_relation_copy_for_cluster(), hypothetical_dense_rank_final(), hypothetical_rank_common(), initialize_phase(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), percentile_disc_multi_final(), process_ordered_aggregate_multi(), process_ordered_aggregate_single(), switchToPresortedPrefixMode(), and validate_index().

◆ tuplesort_putbrintuple()

void tuplesort_putbrintuple ( Tuplesortstate state,
BrinTuple tuple,
Size  size 
)

Definition at line 854 of file tuplesortvariants.c.

855{
856 SortTuple stup;
857 BrinSortTuple *bstup;
860 Size tuplen;
861
862 /* allocate space for the whole BRIN sort tuple */
863 bstup = palloc(BRINSORTTUPLE_SIZE(size));
864
865 bstup->tuplen = size;
866 memcpy(&bstup->tuple, tuple, size);
867
868 stup.tuple = bstup;
869 stup.datum1 = UInt32GetDatum(tuple->bt_blkno);
870 stup.isnull1 = false;
871
872 /* GetMemoryChunkSpace is not supported for bump contexts */
874 tuplen = MAXALIGN(BRINSORTTUPLE_SIZE(size));
875 else
876 tuplen = GetMemoryChunkSpace(bstup);
877
879 base->sortKeys &&
880 base->sortKeys->abbrev_converter &&
881 !stup.isnull1, tuplen);
882
883 MemoryContextSwitchTo(oldcontext);
884}
Size GetMemoryChunkSpace(void *pointer)
Definition: mcxt.c:767
void * palloc(Size size)
Definition: mcxt.c:1365
static Datum UInt32GetDatum(uint32 X)
Definition: postgres.h:242
BlockNumber bt_blkno
Definition: brin_tuple.h:66
bool isnull1
Definition: tuplesort.h:152
MemoryContext tuplecontext
Definition: tuplesort.h:222
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
Definition: tuplesort.c:1155
#define TupleSortUseBumpTupleCxt(opt)
Definition: tuplesort.h:109
#define BRINSORTTUPLE_SIZE(len)

References SortSupportData::abbrev_converter, BRINSORTTUPLE_SIZE, BrinTuple::bt_blkno, SortTuple::datum1, GetMemoryChunkSpace(), SortTuple::isnull1, MAXALIGN, MemoryContextSwitchTo(), palloc(), TuplesortPublic::sortKeys, TuplesortPublic::sortopt, BrinSortTuple::tuple, SortTuple::tuple, TuplesortPublic::tuplecontext, BrinSortTuple::tuplen, tuplesort_puttuple_common(), TuplesortstateGetPublic, TupleSortUseBumpTupleCxt, and UInt32GetDatum().

Referenced by form_and_spill_tuple().

◆ tuplesort_putdatum()

void tuplesort_putdatum ( Tuplesortstate state,
Datum  val,
bool  isNull 
)

Definition at line 923 of file tuplesortvariants.c.

924{
928 SortTuple stup;
929
930 /*
931 * Pass-by-value types or null values are just stored directly in
932 * stup.datum1 (and stup.tuple is not used and set to NULL).
933 *
934 * Non-null pass-by-reference values need to be copied into memory we
935 * control, and possibly abbreviated. The copied value is pointed to by
936 * stup.tuple and is treated as the canonical copy (e.g. to return via
937 * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
938 * abbreviated value if abbreviation is happening, otherwise it's
939 * identical to stup.tuple.
940 */
941
942 if (isNull || !base->tuples)
943 {
944 /*
945 * Set datum1 to zeroed representation for NULLs (to be consistent,
946 * and to support cheap inequality tests for NULL abbreviated keys).
947 */
948 stup.datum1 = !isNull ? val : (Datum) 0;
949 stup.isnull1 = isNull;
950 stup.tuple = NULL; /* no separate storage */
951 }
952 else
953 {
954 stup.isnull1 = false;
955 stup.datum1 = datumCopy(val, false, arg->datumTypeLen);
956 stup.tuple = DatumGetPointer(stup.datum1);
957 }
958
960 base->tuples &&
961 base->sortKeys->abbrev_converter && !isNull, 0);
962
963 MemoryContextSwitchTo(oldcontext);
964}
uint64_t Datum
Definition: postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:322

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, datumCopy(), DatumGetPointer(), if(), MemoryContextSwitchTo(), TuplesortPublic::sortKeys, TuplesortPublic::tuplecontext, TuplesortPublic::tuples, tuplesort_puttuple_common(), TuplesortstateGetPublic, and val.

Referenced by array_sort_internal(), ExecEvalAggOrderedTransDatum(), ExecSort(), ordered_set_transition(), and validate_index_callback().

◆ tuplesort_putgintuple()

void tuplesort_putgintuple ( Tuplesortstate state,
GinTuple tuple,
Size  size 
)

Definition at line 887 of file tuplesortvariants.c.

888{
889 SortTuple stup;
890 GinTuple *ctup;
893 Size tuplen;
894
895 /* copy the GinTuple into the right memory context */
896 ctup = palloc(size);
897 memcpy(ctup, tuple, size);
898
899 stup.tuple = ctup;
900 stup.datum1 = (Datum) 0;
901 stup.isnull1 = false;
902
903 /* GetMemoryChunkSpace is not supported for bump contexts */
905 tuplen = MAXALIGN(size);
906 else
907 tuplen = GetMemoryChunkSpace(ctup);
908
910 base->sortKeys &&
911 base->sortKeys->abbrev_converter &&
912 !stup.isnull1, tuplen);
913
914 MemoryContextSwitchTo(oldcontext);
915}

References SortSupportData::abbrev_converter, SortTuple::datum1, GetMemoryChunkSpace(), SortTuple::isnull1, MAXALIGN, MemoryContextSwitchTo(), palloc(), TuplesortPublic::sortKeys, TuplesortPublic::sortopt, SortTuple::tuple, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, and TupleSortUseBumpTupleCxt.

Referenced by _gin_process_worker_data(), and ginFlushBuildState().

◆ tuplesort_putheaptuple()

void tuplesort_putheaptuple ( Tuplesortstate state,
HeapTuple  tup 
)

Definition at line 775 of file tuplesortvariants.c.

776{
777 SortTuple stup;
781 Size tuplen;
782
783 /* copy the tuple into sort storage */
784 tup = heap_copytuple(tup);
785 stup.tuple = tup;
786
787 /*
788 * set up first-column key value, and potentially abbreviate, if it's a
789 * simple column
790 */
791 if (base->haveDatum1)
792 {
793 stup.datum1 = heap_getattr(tup,
794 arg->indexInfo->ii_IndexAttrNumbers[0],
795 arg->tupDesc,
796 &stup.isnull1);
797 }
798
799 /* GetMemoryChunkSpace is not supported for bump contexts */
801 tuplen = MAXALIGN(HEAPTUPLESIZE + tup->t_len);
802 else
803 tuplen = GetMemoryChunkSpace(tup);
804
806 base->haveDatum1 &&
807 base->sortKeys->abbrev_converter &&
808 !stup.isnull1, tuplen);
809
810 MemoryContextSwitchTo(oldcontext);
811}
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:778
#define HEAPTUPLESIZE
Definition: htup.h:73
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:904
uint32 t_len
Definition: htup.h:64

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, SortTuple::datum1, GetMemoryChunkSpace(), TuplesortPublic::haveDatum1, heap_copytuple(), heap_getattr(), HEAPTUPLESIZE, SortTuple::isnull1, MAXALIGN, MemoryContextSwitchTo(), TuplesortPublic::sortKeys, TuplesortPublic::sortopt, HeapTupleData::t_len, SortTuple::tuple, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, and TupleSortUseBumpTupleCxt.

Referenced by heapam_relation_copy_for_cluster().

◆ tuplesort_putindextuplevalues()

void tuplesort_putindextuplevalues ( Tuplesortstate state,
Relation  rel,
const ItemPointerData self,
const Datum values,
const bool *  isnull 
)

Definition at line 818 of file tuplesortvariants.c.

821{
822 SortTuple stup;
823 IndexTuple tuple;
826 Size tuplen;
827
829 isnull, base->tuplecontext);
830 tuple = ((IndexTuple) stup.tuple);
831 tuple->t_tid = *self;
832 /* set up first-column key value */
833 stup.datum1 = index_getattr(tuple,
834 1,
835 RelationGetDescr(arg->indexRel),
836 &stup.isnull1);
837
838 /* GetMemoryChunkSpace is not supported for bump contexts */
840 tuplen = MAXALIGN(tuple->t_info & INDEX_SIZE_MASK);
841 else
842 tuplen = GetMemoryChunkSpace(tuple);
843
845 base->sortKeys &&
846 base->sortKeys->abbrev_converter &&
847 !stup.isnull1, tuplen);
848}
static Datum values[MAXATTR]
Definition: bootstrap.c:153
IndexTuple index_form_tuple_context(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull, MemoryContext context)
Definition: indextuple.c:65
IndexTupleData * IndexTuple
Definition: itup.h:53
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:131
#define INDEX_SIZE_MASK
Definition: itup.h:65
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, SortTuple::datum1, GetMemoryChunkSpace(), index_form_tuple_context(), index_getattr(), INDEX_SIZE_MASK, SortTuple::isnull1, MAXALIGN, RelationGetDescr, TuplesortPublic::sortKeys, TuplesortPublic::sortopt, IndexTupleData::t_info, IndexTupleData::t_tid, SortTuple::tuple, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, TupleSortUseBumpTupleCxt, and values.

Referenced by _bt_spool(), _h_spool(), and gistSortedBuildCallback().

◆ tuplesort_puttuple_common()

void tuplesort_puttuple_common ( Tuplesortstate state,
SortTuple tuple,
bool  useAbbrev,
Size  tuplen 
)

Definition at line 1155 of file tuplesort.c.

1157{
1158 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1159
1160 Assert(!LEADER(state));
1161
1162 /* account for the memory used for this tuple */
1163 USEMEM(state, tuplen);
1164 state->tupleMem += tuplen;
1165
1166 if (!useAbbrev)
1167 {
1168 /*
1169 * Leave ordinary Datum representation, or NULL value. If there is a
1170 * converter it won't expect NULL values, and cost model is not
1171 * required to account for NULL, so in that case we avoid calling
1172 * converter and just set datum1 to zeroed representation (to be
1173 * consistent, and to support cheap inequality tests for NULL
1174 * abbreviated keys).
1175 */
1176 }
1177 else if (!consider_abort_common(state))
1178 {
1179 /* Store abbreviated key representation */
1180 tuple->datum1 = state->base.sortKeys->abbrev_converter(tuple->datum1,
1181 state->base.sortKeys);
1182 }
1183 else
1184 {
1185 /*
1186 * Set state to be consistent with never trying abbreviation.
1187 *
1188 * Alter datum1 representation in already-copied tuples, so as to
1189 * ensure a consistent representation (current tuple was just
1190 * handled). It does not matter if some dumped tuples are already
1191 * sorted on tape, since serialized tuples lack abbreviated keys
1192 * (TSS_BUILDRUNS state prevents control reaching here in any case).
1193 */
1194 REMOVEABBREV(state, state->memtuples, state->memtupcount);
1195 }
1196
1197 switch (state->status)
1198 {
1199 case TSS_INITIAL:
1200
1201 /*
1202 * Save the tuple into the unsorted array. First, grow the array
1203 * as needed. Note that we try to grow the array when there is
1204 * still one free slot remaining --- if we fail, there'll still be
1205 * room to store the incoming tuple, and then we'll switch to
1206 * tape-based operation.
1207 */
1208 if (state->memtupcount >= state->memtupsize - 1)
1209 {
1210 (void) grow_memtuples(state);
1211 Assert(state->memtupcount < state->memtupsize);
1212 }
1213 state->memtuples[state->memtupcount++] = *tuple;
1214
1215 /*
1216 * Check if it's time to switch over to a bounded heapsort. We do
1217 * so if the input tuple count exceeds twice the desired tuple
1218 * count (this is a heuristic for where heapsort becomes cheaper
1219 * than a quicksort), or if we've just filled workMem and have
1220 * enough tuples to meet the bound.
1221 *
1222 * Note that once we enter TSS_BOUNDED state we will always try to
1223 * complete the sort that way. In the worst case, if later input
1224 * tuples are larger than earlier ones, this might cause us to
1225 * exceed workMem significantly.
1226 */
1227 if (state->bounded &&
1228 (state->memtupcount > state->bound * 2 ||
1229 (state->memtupcount > state->bound && LACKMEM(state))))
1230 {
1231 if (trace_sort)
1232 elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1233 state->memtupcount,
1234 pg_rusage_show(&state->ru_start));
1236 MemoryContextSwitchTo(oldcontext);
1237 return;
1238 }
1239
1240 /*
1241 * Done if we still fit in available memory and have array slots.
1242 */
1243 if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1244 {
1245 MemoryContextSwitchTo(oldcontext);
1246 return;
1247 }
1248
1249 /*
1250 * Nope; time to switch to tape-based operation.
1251 */
1252 inittapes(state, true);
1253
1254 /*
1255 * Dump all tuples.
1256 */
1257 dumptuples(state, false);
1258 break;
1259
1260 case TSS_BOUNDED:
1261
1262 /*
1263 * We don't want to grow the array here, so check whether the new
1264 * tuple can be discarded before putting it in. This should be a
1265 * good speed optimization, too, since when there are many more
1266 * input tuples than the bound, most input tuples can be discarded
1267 * with just this one comparison. Note that because we currently
1268 * have the sort direction reversed, we must check for <= not >=.
1269 */
1270 if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1271 {
1272 /* new tuple <= top of the heap, so we can discard it */
1273 free_sort_tuple(state, tuple);
1275 }
1276 else
1277 {
1278 /* discard top of heap, replacing it with the new tuple */
1279 free_sort_tuple(state, &state->memtuples[0]);
1281 }
1282 break;
1283
1284 case TSS_BUILDRUNS:
1285
1286 /*
1287 * Save the tuple into the unsorted array (there must be space)
1288 */
1289 state->memtuples[state->memtupcount++] = *tuple;
1290
1291 /*
1292 * If we are over the memory limit, dump all tuples.
1293 */
1294 dumptuples(state, false);
1295 break;
1296
1297 default:
1298 elog(ERROR, "invalid tuplesort state");
1299 break;
1300 }
1301 MemoryContextSwitchTo(oldcontext);
1302}
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
#define COMPARETUP(state, a, b)
Definition: tuplesort.c:394
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
Definition: tuplesort.c:3112
#define REMOVEABBREV(state, stup, count)
Definition: tuplesort.c:393
#define LACKMEM(state)
Definition: tuplesort.c:398
#define USEMEM(state, amt)
Definition: tuplesort.c:399
static bool grow_memtuples(Tuplesortstate *state)
Definition: tuplesort.c:1038
static void make_bounded_heap(Tuplesortstate *state)
Definition: tuplesort.c:2573
#define LEADER(state)
Definition: tuplesort.c:403
static bool consider_abort_common(Tuplesortstate *state)
Definition: tuplesort.c:1305

References Assert(), CHECK_FOR_INTERRUPTS, COMPARETUP, consider_abort_common(), SortTuple::datum1, dumptuples(), elog, ERROR, free_sort_tuple(), grow_memtuples(), inittapes(), LACKMEM, LEADER, LOG, make_bounded_heap(), MemoryContextSwitchTo(), pg_rusage_show(), REMOVEABBREV, trace_sort, TSS_BOUNDED, TSS_BUILDRUNS, TSS_INITIAL, tuplesort_heap_replace_top(), and USEMEM.

Referenced by tuplesort_putbrintuple(), tuplesort_putdatum(), tuplesort_putgintuple(), tuplesort_putheaptuple(), tuplesort_putindextuplevalues(), and tuplesort_puttupleslot().

◆ tuplesort_puttupleslot()

void tuplesort_puttupleslot ( Tuplesortstate state,
TupleTableSlot slot 
)

Definition at line 735 of file tuplesortvariants.c.

736{
739 TupleDesc tupDesc = (TupleDesc) base->arg;
740 SortTuple stup;
741 MinimalTuple tuple;
742 HeapTupleData htup;
743 Size tuplen;
744
745 /* copy the tuple into sort storage */
746 tuple = ExecCopySlotMinimalTuple(slot);
747 stup.tuple = tuple;
748 /* set up first-column key value */
749 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
750 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
751 stup.datum1 = heap_getattr(&htup,
752 base->sortKeys[0].ssup_attno,
753 tupDesc,
754 &stup.isnull1);
755
756 /* GetMemoryChunkSpace is not supported for bump contexts */
758 tuplen = MAXALIGN(tuple->t_len);
759 else
760 tuplen = GetMemoryChunkSpace(tuple);
761
763 base->sortKeys->abbrev_converter &&
764 !stup.isnull1, tuplen);
765
766 MemoryContextSwitchTo(oldcontext);
767}
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MINIMAL_TUPLE_OFFSET
Definition: htup_details.h:669
struct TupleDescData * TupleDesc
Definition: tupdesc.h:145
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:495

References SortSupportData::abbrev_converter, TuplesortPublic::arg, ExecCopySlotMinimalTuple(), GetMemoryChunkSpace(), heap_getattr(), MAXALIGN, MemoryContextSwitchTo(), MINIMAL_TUPLE_OFFSET, TuplesortPublic::sortKeys, TuplesortPublic::sortopt, SortSupportData::ssup_attno, MinimalTupleData::t_len, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, and TupleSortUseBumpTupleCxt.

Referenced by ExecEvalAggOrderedTransTuple(), ExecIncrementalSort(), ExecSort(), fetch_input_tuple(), hypothetical_dense_rank_final(), hypothetical_rank_common(), ordered_set_transition_multi(), and switchToPresortedPrefixMode().

◆ tuplesort_readtup_alloc()

void * tuplesort_readtup_alloc ( Tuplesortstate state,
Size  tuplen 
)

Definition at line 2867 of file tuplesort.c.

2868{
2869 SlabSlot *buf;
2870
2871 /*
2872 * We pre-allocate enough slots in the slab arena that we should never run
2873 * out.
2874 */
2875 Assert(state->slabFreeHead);
2876
2877 if (tuplen > SLAB_SLOT_SIZE || !state->slabFreeHead)
2878 return MemoryContextAlloc(state->base.sortcontext, tuplen);
2879 else
2880 {
2881 buf = state->slabFreeHead;
2882 /* Reuse this slot */
2883 state->slabFreeHead = buf->nextfree;
2884
2885 return buf;
2886 }
2887}
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
static char buf[DEFAULT_XLOG_SEG_SIZE]
Definition: pg_test_fsync.c:71
#define SLAB_SLOT_SIZE
Definition: tuplesort.c:140

References Assert(), buf, MemoryContextAlloc(), and SLAB_SLOT_SIZE.

Referenced by readtup_cluster(), readtup_datum(), readtup_heap(), readtup_index(), readtup_index_brin(), and readtup_index_gin().

◆ tuplesort_rescan()

void tuplesort_rescan ( Tuplesortstate state)

Definition at line 2388 of file tuplesort.c.

2389{
2390 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2391
2392 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2393
2394 switch (state->status)
2395 {
2396 case TSS_SORTEDINMEM:
2397 state->current = 0;
2398 state->eof_reached = false;
2399 state->markpos_offset = 0;
2400 state->markpos_eof = false;
2401 break;
2402 case TSS_SORTEDONTAPE:
2403 LogicalTapeRewindForRead(state->result_tape, 0);
2404 state->eof_reached = false;
2405 state->markpos_block = 0L;
2406 state->markpos_offset = 0;
2407 state->markpos_eof = false;
2408 break;
2409 default:
2410 elog(ERROR, "invalid tuplesort state");
2411 break;
2412 }
2413
2414 MemoryContextSwitchTo(oldcontext);
2415}
void LogicalTapeRewindForRead(LogicalTape *lt, size_t buffer_size)
Definition: logtape.c:846

References Assert(), elog, ERROR, LogicalTapeRewindForRead(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecReScanSort(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), and percentile_disc_multi_final().

◆ tuplesort_reset()

void tuplesort_reset ( Tuplesortstate state)

Definition at line 1005 of file tuplesort.c.

1006{
1009
1010 /*
1011 * After we've freed up per-batch memory, re-setup all of the state common
1012 * to both the first batch and any subsequent batch.
1013 */
1015
1016 state->lastReturnedTuple = NULL;
1017 state->slabMemoryBegin = NULL;
1018 state->slabMemoryEnd = NULL;
1019 state->slabFreeHead = NULL;
1020}

References tuplesort_begin_batch(), tuplesort_free(), and tuplesort_updatemax().

Referenced by ExecIncrementalSort(), ExecReScanIncrementalSort(), and switchToPresortedPrefixMode().

◆ tuplesort_restorepos()

void tuplesort_restorepos ( Tuplesortstate state)

Definition at line 2452 of file tuplesort.c.

2453{
2454 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2455
2456 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2457
2458 switch (state->status)
2459 {
2460 case TSS_SORTEDINMEM:
2461 state->current = state->markpos_offset;
2462 state->eof_reached = state->markpos_eof;
2463 break;
2464 case TSS_SORTEDONTAPE:
2465 LogicalTapeSeek(state->result_tape,
2466 state->markpos_block,
2467 state->markpos_offset);
2468 state->eof_reached = state->markpos_eof;
2469 break;
2470 default:
2471 elog(ERROR, "invalid tuplesort state");
2472 break;
2473 }
2474
2475 MemoryContextSwitchTo(oldcontext);
2476}
void LogicalTapeSeek(LogicalTape *lt, int64 blocknum, int offset)
Definition: logtape.c:1133

References Assert(), elog, ERROR, LogicalTapeSeek(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecSortRestrPos().

◆ tuplesort_set_bound()

void tuplesort_set_bound ( Tuplesortstate state,
int64  bound 
)

Definition at line 824 of file tuplesort.c.

825{
826 /* Assert we're called before loading any tuples */
827 Assert(state->status == TSS_INITIAL && state->memtupcount == 0);
828 /* Assert we allow bounded sorts */
829 Assert(state->base.sortopt & TUPLESORT_ALLOWBOUNDED);
830 /* Can't set the bound twice, either */
831 Assert(!state->bounded);
832 /* Also, this shouldn't be called in a parallel worker */
834
835 /* Parallel leader allows but ignores hint */
836 if (LEADER(state))
837 return;
838
839#ifdef DEBUG_BOUNDED_SORT
840 /* Honor GUC setting that disables the feature (for easy testing) */
841 if (!optimize_bounded_sort)
842 return;
843#endif
844
845 /* We want to be able to compute bound * 2, so limit the setting */
846 if (bound > (int64) (INT_MAX / 2))
847 return;
848
849 state->bounded = true;
850 state->bound = (int) bound;
851
852 /*
853 * Bounded sorts are not an effective target for abbreviated key
854 * optimization. Disable by setting state to be consistent with no
855 * abbreviation support.
856 */
857 state->base.sortKeys->abbrev_converter = NULL;
858 if (state->base.sortKeys->abbrev_full_comparator)
859 state->base.sortKeys->comparator = state->base.sortKeys->abbrev_full_comparator;
860
861 /* Not strictly necessary, but be tidy */
862 state->base.sortKeys->abbrev_abort = NULL;
863 state->base.sortKeys->abbrev_full_comparator = NULL;
864}
#define TUPLESORT_ALLOWBOUNDED
Definition: tuplesort.h:100

References Assert(), LEADER, TSS_INITIAL, TUPLESORT_ALLOWBOUNDED, and WORKER.

Referenced by ExecIncrementalSort(), ExecSort(), and switchToPresortedPrefixMode().

◆ tuplesort_skiptuples()

bool tuplesort_skiptuples ( Tuplesortstate state,
int64  ntuples,
bool  forward 
)

Definition at line 1696 of file tuplesort.c.

1697{
1698 MemoryContext oldcontext;
1699
1700 /*
1701 * We don't actually support backwards skip yet, because no callers need
1702 * it. The API is designed to allow for that later, though.
1703 */
1704 Assert(forward);
1705 Assert(ntuples >= 0);
1706 Assert(!WORKER(state));
1707
1708 switch (state->status)
1709 {
1710 case TSS_SORTEDINMEM:
1711 if (state->memtupcount - state->current >= ntuples)
1712 {
1713 state->current += ntuples;
1714 return true;
1715 }
1716 state->current = state->memtupcount;
1717 state->eof_reached = true;
1718
1719 /*
1720 * Complain if caller tries to retrieve more tuples than
1721 * originally asked for in a bounded sort. This is because
1722 * returning EOF here might be the wrong thing.
1723 */
1724 if (state->bounded && state->current >= state->bound)
1725 elog(ERROR, "retrieved too many tuples in a bounded sort");
1726
1727 return false;
1728
1729 case TSS_SORTEDONTAPE:
1730 case TSS_FINALMERGE:
1731
1732 /*
1733 * We could probably optimize these cases better, but for now it's
1734 * not worth the trouble.
1735 */
1736 oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1737 while (ntuples-- > 0)
1738 {
1739 SortTuple stup;
1740
1741 if (!tuplesort_gettuple_common(state, forward, &stup))
1742 {
1743 MemoryContextSwitchTo(oldcontext);
1744 return false;
1745 }
1747 }
1748 MemoryContextSwitchTo(oldcontext);
1749 return true;
1750
1751 default:
1752 elog(ERROR, "invalid tuplesort state");
1753 return false; /* keep compiler quiet */
1754 }
1755}

References Assert(), CHECK_FOR_INTERRUPTS, elog, ERROR, MemoryContextSwitchTo(), TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, tuplesort_gettuple_common(), and WORKER.

Referenced by percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), and percentile_disc_multi_final().

◆ tuplesort_space_type_name()

const char * tuplesort_space_type_name ( TuplesortSpaceType  t)

Definition at line 2552 of file tuplesort.c.

2553{
2555 return t == SORT_SPACE_TYPE_DISK ? "Disk" : "Memory";
2556}

References Assert(), SORT_SPACE_TYPE_DISK, and SORT_SPACE_TYPE_MEMORY.

Referenced by show_incremental_sort_group_info(), and show_sort_info().

◆ tuplesort_used_bound()

bool tuplesort_used_bound ( Tuplesortstate state)

Definition at line 872 of file tuplesort.c.

873{
874 return state->boundUsed;
875}

Referenced by ExecIncrementalSort().