PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
tuplesort.h File Reference
#include "access/brin_tuple.h"
#include "access/itup.h"
#include "executor/tuptable.h"
#include "storage/dsm.h"
#include "utils/logtape.h"
#include "utils/relcache.h"
#include "utils/sortsupport.h"
Include dependency graph for tuplesort.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  SortCoordinateData
 
struct  TuplesortInstrumentation
 
struct  SortTuple
 
struct  TuplesortPublic
 

Macros

#define NUM_TUPLESORTMETHODS   4
 
#define TUPLESORT_NONE   0
 
#define TUPLESORT_RANDOMACCESS   (1 << 0)
 
#define TUPLESORT_ALLOWBOUNDED   (1 << 1)
 
#define TupleSortUseBumpTupleCxt(opt)   (((opt) & TUPLESORT_ALLOWBOUNDED) == 0)
 
#define PARALLEL_SORT(coordinate)
 
#define TuplesortstateGetPublic(state)   ((TuplesortPublic *) state)
 
#define LogicalTapeReadExact(tape, ptr, len)
 

Typedefs

typedef struct Tuplesortstate Tuplesortstate
 
typedef struct Sharedsort Sharedsort
 
typedef struct SortCoordinateData SortCoordinateData
 
typedef struct SortCoordinateDataSortCoordinate
 
typedef struct TuplesortInstrumentation TuplesortInstrumentation
 
typedef int(* SortTupleComparator) (const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
 

Enumerations

enum  TuplesortMethod {
  SORT_TYPE_STILL_IN_PROGRESS = 0 , SORT_TYPE_TOP_N_HEAPSORT = 1 << 0 , SORT_TYPE_QUICKSORT = 1 << 1 , SORT_TYPE_EXTERNAL_SORT = 1 << 2 ,
  SORT_TYPE_EXTERNAL_MERGE = 1 << 3
}
 
enum  TuplesortSpaceType { SORT_SPACE_TYPE_DISK , SORT_SPACE_TYPE_MEMORY }
 

Functions

Tuplesortstatetuplesort_begin_common (int workMem, SortCoordinate coordinate, int sortopt)
 
void tuplesort_set_bound (Tuplesortstate *state, int64 bound)
 
bool tuplesort_used_bound (Tuplesortstate *state)
 
void tuplesort_puttuple_common (Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
 
void tuplesort_performsort (Tuplesortstate *state)
 
bool tuplesort_gettuple_common (Tuplesortstate *state, bool forward, SortTuple *stup)
 
bool tuplesort_skiptuples (Tuplesortstate *state, int64 ntuples, bool forward)
 
void tuplesort_end (Tuplesortstate *state)
 
void tuplesort_reset (Tuplesortstate *state)
 
void tuplesort_get_stats (Tuplesortstate *state, TuplesortInstrumentation *stats)
 
const char * tuplesort_method_name (TuplesortMethod m)
 
const char * tuplesort_space_type_name (TuplesortSpaceType t)
 
int tuplesort_merge_order (int64 allowedMem)
 
Size tuplesort_estimate_shared (int nWorkers)
 
void tuplesort_initialize_shared (Sharedsort *shared, int nWorkers, dsm_segment *seg)
 
void tuplesort_attach_shared (Sharedsort *shared, dsm_segment *seg)
 
void tuplesort_rescan (Tuplesortstate *state)
 
void tuplesort_markpos (Tuplesortstate *state)
 
void tuplesort_restorepos (Tuplesortstate *state)
 
void * tuplesort_readtup_alloc (Tuplesortstate *state, Size tuplen)
 
Tuplesortstatetuplesort_begin_heap (TupleDesc tupDesc, int nkeys, AttrNumber *attNums, Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_cluster (TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_btree (Relation heapRel, Relation indexRel, bool enforceUnique, bool uniqueNullsNotDistinct, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_hash (Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, uint32 max_buckets, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_gist (Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_brin (int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_datum (Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, SortCoordinate coordinate, int sortopt)
 
void tuplesort_puttupleslot (Tuplesortstate *state, TupleTableSlot *slot)
 
void tuplesort_putheaptuple (Tuplesortstate *state, HeapTuple tup)
 
void tuplesort_putindextuplevalues (Tuplesortstate *state, Relation rel, ItemPointer self, const Datum *values, const bool *isnull)
 
void tuplesort_putbrintuple (Tuplesortstate *state, BrinTuple *tuple, Size size)
 
void tuplesort_putdatum (Tuplesortstate *state, Datum val, bool isNull)
 
bool tuplesort_gettupleslot (Tuplesortstate *state, bool forward, bool copy, TupleTableSlot *slot, Datum *abbrev)
 
HeapTuple tuplesort_getheaptuple (Tuplesortstate *state, bool forward)
 
IndexTuple tuplesort_getindextuple (Tuplesortstate *state, bool forward)
 
BrinTupletuplesort_getbrintuple (Tuplesortstate *state, Size *len, bool forward)
 
bool tuplesort_getdatum (Tuplesortstate *state, bool forward, bool copy, Datum *val, bool *isNull, Datum *abbrev)
 

Macro Definition Documentation

◆ LogicalTapeReadExact

#define LogicalTapeReadExact (   tape,
  ptr,
  len 
)
Value:
do { \
if (LogicalTapeRead(tape, ptr, len) != (size_t) (len)) \
elog(ERROR, "unexpected end of data"); \
} while(0)
#define ERROR
Definition: elog.h:39
size_t LogicalTapeRead(LogicalTape *lt, void *ptr, size_t size)
Definition: logtape.c:928
const void size_t len

Definition at line 262 of file tuplesort.h.

◆ NUM_TUPLESORTMETHODS

#define NUM_TUPLESORTMETHODS   4

Definition at line 84 of file tuplesort.h.

◆ PARALLEL_SORT

#define PARALLEL_SORT (   coordinate)
Value:
(coordinate == NULL || \
(coordinate)->sharedsort == NULL ? 0 : \
(coordinate)->isWorker ? 1 : 2)

Definition at line 255 of file tuplesort.h.

◆ TUPLESORT_ALLOWBOUNDED

#define TUPLESORT_ALLOWBOUNDED   (1 << 1)

Definition at line 99 of file tuplesort.h.

◆ TUPLESORT_NONE

#define TUPLESORT_NONE   0

Definition at line 93 of file tuplesort.h.

◆ TUPLESORT_RANDOMACCESS

#define TUPLESORT_RANDOMACCESS   (1 << 0)

Definition at line 96 of file tuplesort.h.

◆ TuplesortstateGetPublic

#define TuplesortstateGetPublic (   state)    ((TuplesortPublic *) state)

Definition at line 259 of file tuplesort.h.

◆ TupleSortUseBumpTupleCxt

#define TupleSortUseBumpTupleCxt (   opt)    (((opt) & TUPLESORT_ALLOWBOUNDED) == 0)

Definition at line 108 of file tuplesort.h.

Typedef Documentation

◆ Sharedsort

typedef struct Sharedsort Sharedsort

Definition at line 38 of file tuplesort.h.

◆ SortCoordinate

Definition at line 61 of file tuplesort.h.

◆ SortCoordinateData

◆ SortTupleComparator

typedef int(* SortTupleComparator) (const SortTuple *a, const SortTuple *b, Tuplesortstate *state)

Definition at line 155 of file tuplesort.h.

◆ TuplesortInstrumentation

◆ Tuplesortstate

Definition at line 37 of file tuplesort.h.

Enumeration Type Documentation

◆ TuplesortMethod

Enumerator
SORT_TYPE_STILL_IN_PROGRESS 
SORT_TYPE_TOP_N_HEAPSORT 
SORT_TYPE_QUICKSORT 
SORT_TYPE_EXTERNAL_SORT 
SORT_TYPE_EXTERNAL_MERGE 

Definition at line 75 of file tuplesort.h.

76{
79 SORT_TYPE_QUICKSORT = 1 << 1,
TuplesortMethod
Definition: tuplesort.h:76
@ SORT_TYPE_EXTERNAL_SORT
Definition: tuplesort.h:80
@ SORT_TYPE_TOP_N_HEAPSORT
Definition: tuplesort.h:78
@ SORT_TYPE_QUICKSORT
Definition: tuplesort.h:79
@ SORT_TYPE_STILL_IN_PROGRESS
Definition: tuplesort.h:77
@ SORT_TYPE_EXTERNAL_MERGE
Definition: tuplesort.h:81

◆ TuplesortSpaceType

Enumerator
SORT_SPACE_TYPE_DISK 
SORT_SPACE_TYPE_MEMORY 

Definition at line 86 of file tuplesort.h.

87{
TuplesortSpaceType
Definition: tuplesort.h:87
@ SORT_SPACE_TYPE_DISK
Definition: tuplesort.h:88
@ SORT_SPACE_TYPE_MEMORY
Definition: tuplesort.h:89

Function Documentation

◆ tuplesort_attach_shared()

void tuplesort_attach_shared ( Sharedsort shared,
dsm_segment seg 
)

Definition at line 2961 of file tuplesort.c.

2962{
2963 /* Attach to SharedFileSet */
2964 SharedFileSetAttach(&shared->fileset, seg);
2965}
void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg)
Definition: sharedfileset.c:56
SharedFileSet fileset
Definition: tuplesort.c:360

References Sharedsort::fileset, and SharedFileSetAttach().

Referenced by _brin_parallel_build_main(), and _bt_parallel_build_main().

◆ tuplesort_begin_cluster()

Tuplesortstate * tuplesort_begin_cluster ( TupleDesc  tupDesc,
Relation  indexRel,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 241 of file tuplesortvariants.c.

245{
246 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
247 sortopt);
249 BTScanInsert indexScanKey;
250 MemoryContext oldcontext;
252 int i;
253
254 Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
255
256 oldcontext = MemoryContextSwitchTo(base->maincontext);
258
259 if (trace_sort)
260 elog(LOG,
261 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
263 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
264
266
267 TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
268 false, /* no unique check */
269 base->nKeys,
270 workMem,
271 sortopt & TUPLESORT_RANDOMACCESS,
272 PARALLEL_SORT(coordinate));
273
278 base->readtup = readtup_cluster;
280 base->arg = arg;
281
282 arg->indexInfo = BuildIndexInfo(indexRel);
283
284 /*
285 * If we don't have a simple leading attribute, we don't currently
286 * initialize datum1, so disable optimizations that require it.
287 */
288 if (arg->indexInfo->ii_IndexAttrNumbers[0] == 0)
289 base->haveDatum1 = false;
290 else
291 base->haveDatum1 = true;
292
293 arg->tupDesc = tupDesc; /* assume we need not copy tupDesc */
294
295 indexScanKey = _bt_mkscankey(indexRel, NULL);
296
297 if (arg->indexInfo->ii_Expressions != NULL)
298 {
299 TupleTableSlot *slot;
300 ExprContext *econtext;
301
302 /*
303 * We will need to use FormIndexDatum to evaluate the index
304 * expressions. To do that, we need an EState, as well as a
305 * TupleTableSlot to put the table tuples into. The econtext's
306 * scantuple has to point to that slot, too.
307 */
308 arg->estate = CreateExecutorState();
310 econtext = GetPerTupleExprContext(arg->estate);
311 econtext->ecxt_scantuple = slot;
312 }
313
314 /* Prepare SortSupport data for each column */
315 base->sortKeys = (SortSupport) palloc0(base->nKeys *
316 sizeof(SortSupportData));
317
318 for (i = 0; i < base->nKeys; i++)
319 {
320 SortSupport sortKey = base->sortKeys + i;
321 ScanKey scanKey = indexScanKey->scankeys + i;
322 int16 strategy;
323
325 sortKey->ssup_collation = scanKey->sk_collation;
326 sortKey->ssup_nulls_first =
327 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
328 sortKey->ssup_attno = scanKey->sk_attno;
329 /* Convey if abbreviation optimization is applicable in principle */
330 sortKey->abbreviate = (i == 0 && base->haveDatum1);
331
332 Assert(sortKey->ssup_attno != 0);
333
334 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
336
337 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
338 }
339
340 pfree(indexScanKey);
341
342 MemoryContextSwitchTo(oldcontext);
343
344 return state;
345}
#define Assert(condition)
Definition: c.h:812
int16_t int16
Definition: c.h:480
#define LOG
Definition: elog.h:31
#define elog(elevel,...)
Definition: elog.h:225
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1425
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:85
EState * CreateExecutorState(void)
Definition: execUtils.c:88
#define GetPerTupleExprContext(estate)
Definition: executor.h:563
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:2426
int i
Definition: isn.c:72
void pfree(void *pointer)
Definition: mcxt.c:1521
void * palloc0(Size size)
Definition: mcxt.c:1347
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1118
#define SK_BT_DESC
Definition: nbtree.h:1117
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:129
void * arg
MemoryContextSwitchTo(old_ctx)
#define RelationGetNumberOfAttributes(relation)
Definition: rel.h:511
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:524
void PrepareSortSupportFromIndexRel(Relation indexRel, int16 strategy, SortSupport ssup)
Definition: sortsupport.c:161
struct SortSupportData * SortSupport
Definition: sortsupport.h:58
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define BTLessStrategyNumber
Definition: stratnum.h:29
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:793
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:258
Form_pg_class rd_rel
Definition: rel.h:111
int sk_flags
Definition: skey.h:66
Oid sk_collation
Definition: skey.h:70
AttrNumber sk_attno
Definition: skey.h:67
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
MemoryContext ssup_cxt
Definition: sortsupport.h:66
MemoryContext maincontext
Definition: tuplesort.h:218
void(* writetup)(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
Definition: tuplesort.h:194
void(* removeabbrev)(Tuplesortstate *state, SortTuple *stups, int count)
Definition: tuplesort.h:187
void(* freestate)(Tuplesortstate *state)
Definition: tuplesort.h:212
void(* readtup)(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
Definition: tuplesort.h:203
SortTupleComparator comparetup
Definition: tuplesort.h:174
SortSupport sortKeys
Definition: tuplesort.h:235
SortTupleComparator comparetup_tiebreak
Definition: tuplesort.h:181
Definition: regguts.h:323
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
Definition: tuplesort.c:642
bool trace_sort
Definition: tuplesort.c:124
#define PARALLEL_SORT(coordinate)
Definition: tuplesort.h:255
#define TUPLESORT_RANDOMACCESS
Definition: tuplesort.h:96
#define TuplesortstateGetPublic(state)
Definition: tuplesort.h:259
static int comparetup_cluster_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_cluster(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int tuplen)
#define CLUSTER_SORT
static void freestate_cluster(Tuplesortstate *state)
static int comparetup_cluster(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void writetup_cluster(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void removeabbrev_cluster(Tuplesortstate *state, SortTuple *stups, int count)

References _bt_mkscankey(), SortSupportData::abbreviate, arg, TuplesortPublic::arg, Assert, BTGreaterStrategyNumber, BTLessStrategyNumber, BuildIndexInfo(), CLUSTER_SORT, TuplesortPublic::comparetup, comparetup_cluster(), comparetup_cluster_tiebreak(), TuplesortPublic::comparetup_tiebreak, CreateExecutorState(), CurrentMemoryContext, ExprContext::ecxt_scantuple, elog, TuplesortPublic::freestate, freestate_cluster(), GetPerTupleExprContext, TuplesortPublic::haveDatum1, i, IndexRelationGetNumberOfKeyAttributes, LOG, TuplesortPublic::maincontext, MakeSingleTupleTableSlot(), MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc0(), PARALLEL_SORT, pfree(), PrepareSortSupportFromIndexRel(), RelationData::rd_rel, TuplesortPublic::readtup, readtup_cluster(), RelationGetNumberOfAttributes, TuplesortPublic::removeabbrev, removeabbrev_cluster(), BTScanInsertData::scankeys, ScanKeyData::sk_attno, SK_BT_DESC, SK_BT_NULLS_FIRST, ScanKeyData::sk_collation, ScanKeyData::sk_flags, TuplesortPublic::sortKeys, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, TTSOpsHeapTuple, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_cluster().

Referenced by heapam_relation_copy_for_cluster().

◆ tuplesort_begin_common()

Tuplesortstate * tuplesort_begin_common ( int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 642 of file tuplesort.c.

643{
645 MemoryContext maincontext;
646 MemoryContext sortcontext;
647 MemoryContext oldcontext;
648
649 /* See leader_takeover_tapes() remarks on random access support */
650 if (coordinate && (sortopt & TUPLESORT_RANDOMACCESS))
651 elog(ERROR, "random access disallowed under parallel sort");
652
653 /*
654 * Memory context surviving tuplesort_reset. This memory context holds
655 * data which is useful to keep while sorting multiple similar batches.
656 */
658 "TupleSort main",
660
661 /*
662 * Create a working memory context for one sort operation. The content of
663 * this context is deleted by tuplesort_reset.
664 */
665 sortcontext = AllocSetContextCreate(maincontext,
666 "TupleSort sort",
668
669 /*
670 * Additionally a working memory context for tuples is setup in
671 * tuplesort_begin_batch.
672 */
673
674 /*
675 * Make the Tuplesortstate within the per-sortstate context. This way, we
676 * don't need a separate pfree() operation for it at shutdown.
677 */
678 oldcontext = MemoryContextSwitchTo(maincontext);
679
681
682 if (trace_sort)
683 pg_rusage_init(&state->ru_start);
684
685 state->base.sortopt = sortopt;
686 state->base.tuples = true;
687 state->abbrevNext = 10;
688
689 /*
690 * workMem is forced to be at least 64KB, the current minimum valid value
691 * for the work_mem GUC. This is a defense against parallel sort callers
692 * that divide out memory among many workers in a way that leaves each
693 * with very little memory.
694 */
695 state->allowedMem = Max(workMem, 64) * (int64) 1024;
696 state->base.sortcontext = sortcontext;
697 state->base.maincontext = maincontext;
698
699 /*
700 * Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD;
701 * see comments in grow_memtuples().
702 */
703 state->memtupsize = INITIAL_MEMTUPSIZE;
704 state->memtuples = NULL;
705
706 /*
707 * After all of the other non-parallel-related state, we setup all of the
708 * state needed for each batch.
709 */
711
712 /*
713 * Initialize parallel-related state based on coordination information
714 * from caller
715 */
716 if (!coordinate)
717 {
718 /* Serial sort */
719 state->shared = NULL;
720 state->worker = -1;
721 state->nParticipants = -1;
722 }
723 else if (coordinate->isWorker)
724 {
725 /* Parallel worker produces exactly one final run from all input */
726 state->shared = coordinate->sharedsort;
728 state->nParticipants = -1;
729 }
730 else
731 {
732 /* Parallel leader state only used for final merge */
733 state->shared = coordinate->sharedsort;
734 state->worker = -1;
735 state->nParticipants = coordinate->nParticipants;
736 Assert(state->nParticipants >= 1);
737 }
738
739 MemoryContextSwitchTo(oldcontext);
740
741 return state;
742}
#define Max(x, y)
Definition: c.h:952
int64_t int64
Definition: c.h:482
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
Sharedsort * sharedsort
Definition: tuplesort.h:58
#define INITIAL_MEMTUPSIZE
Definition: tuplesort.c:120
static int worker_get_identifier(Tuplesortstate *state)
Definition: tuplesort.c:2981
static void tuplesort_begin_batch(Tuplesortstate *state)
Definition: tuplesort.c:752

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert, CurrentMemoryContext, elog, ERROR, INITIAL_MEMTUPSIZE, SortCoordinateData::isWorker, Max, MemoryContextSwitchTo(), SortCoordinateData::nParticipants, palloc0(), pg_rusage_init(), SortCoordinateData::sharedsort, trace_sort, tuplesort_begin_batch(), TUPLESORT_RANDOMACCESS, and worker_get_identifier().

Referenced by tuplesort_begin_cluster(), tuplesort_begin_datum(), tuplesort_begin_heap(), tuplesort_begin_index_brin(), tuplesort_begin_index_btree(), tuplesort_begin_index_gist(), and tuplesort_begin_index_hash().

◆ tuplesort_begin_datum()

Tuplesortstate * tuplesort_begin_datum ( Oid  datumType,
Oid  sortOperator,
Oid  sortCollation,
bool  nullsFirstFlag,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 572 of file tuplesortvariants.c.

575{
576 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
577 sortopt);
580 MemoryContext oldcontext;
581 int16 typlen;
582 bool typbyval;
583
584 oldcontext = MemoryContextSwitchTo(base->maincontext);
586
587 if (trace_sort)
588 elog(LOG,
589 "begin datum sort: workMem = %d, randomAccess = %c",
590 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
591
592 base->nKeys = 1; /* always a one-column sort */
593
594 TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
595 false, /* no unique check */
596 1,
597 workMem,
598 sortopt & TUPLESORT_RANDOMACCESS,
599 PARALLEL_SORT(coordinate));
600
604 base->writetup = writetup_datum;
605 base->readtup = readtup_datum;
606 base->haveDatum1 = true;
607 base->arg = arg;
608
609 arg->datumType = datumType;
610
611 /* lookup necessary attributes of the datum type */
612 get_typlenbyval(datumType, &typlen, &typbyval);
613 arg->datumTypeLen = typlen;
614 base->tuples = !typbyval;
615
616 /* Prepare SortSupport data */
617 base->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
618
620 base->sortKeys->ssup_collation = sortCollation;
621 base->sortKeys->ssup_nulls_first = nullsFirstFlag;
622
623 /*
624 * Abbreviation is possible here only for by-reference types. In theory,
625 * a pass-by-value datatype could have an abbreviated form that is cheaper
626 * to compare. In a tuple sort, we could support that, because we can
627 * always extract the original datum from the tuple as needed. Here, we
628 * can't, because a datum sort only stores a single copy of the datum; the
629 * "tuple" field of each SortTuple is NULL.
630 */
631 base->sortKeys->abbreviate = !typbyval;
632
633 PrepareSortSupportFromOrderingOp(sortOperator, base->sortKeys);
634
635 /*
636 * The "onlyKey" optimization cannot be used with abbreviated keys, since
637 * tie-breaker comparisons may be required. Typically, the optimization
638 * is only of value to pass-by-value types anyway, whereas abbreviated
639 * keys are typically only of value to pass-by-reference types.
640 */
641 if (!base->sortKeys->abbrev_converter)
642 base->onlyKey = base->sortKeys;
643
644 MemoryContextSwitchTo(oldcontext);
645
646 return state;
647}
void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
Definition: lsyscache.c:2251
void * palloc(Size size)
Definition: mcxt.c:1317
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
Definition: sortsupport.c:134
Datum(* abbrev_converter)(Datum original, SortSupport ssup)
Definition: sortsupport.h:172
SortSupport onlyKey
Definition: tuplesort.h:245
static void removeabbrev_datum(Tuplesortstate *state, SortTuple *stups, int count)
static int comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_datum_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_datum(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void writetup_datum(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
#define DATUM_SORT

References SortSupportData::abbrev_converter, SortSupportData::abbreviate, arg, TuplesortPublic::arg, TuplesortPublic::comparetup, comparetup_datum(), comparetup_datum_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, DATUM_SORT, elog, get_typlenbyval(), TuplesortPublic::haveDatum1, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, TuplesortPublic::onlyKey, palloc(), palloc0(), PARALLEL_SORT, PrepareSortSupportFromOrderingOp(), TuplesortPublic::readtup, readtup_datum(), TuplesortPublic::removeabbrev, removeabbrev_datum(), TuplesortPublic::sortKeys, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, TuplesortPublic::tuples, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_datum().

Referenced by ExecSort(), initialize_aggregate(), ordered_set_startup(), and validate_index().

◆ tuplesort_begin_heap()

Tuplesortstate * tuplesort_begin_heap ( TupleDesc  tupDesc,
int  nkeys,
AttrNumber attNums,
Oid sortOperators,
Oid sortCollations,
bool *  nullsFirstFlags,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 168 of file tuplesortvariants.c.

173{
174 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
175 sortopt);
177 MemoryContext oldcontext;
178 int i;
179
180 oldcontext = MemoryContextSwitchTo(base->maincontext);
181
182 Assert(nkeys > 0);
183
184 if (trace_sort)
185 elog(LOG,
186 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
187 nkeys, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
188
189 base->nKeys = nkeys;
190
191 TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
192 false, /* no unique check */
193 nkeys,
194 workMem,
195 sortopt & TUPLESORT_RANDOMACCESS,
196 PARALLEL_SORT(coordinate));
197
201 base->writetup = writetup_heap;
202 base->readtup = readtup_heap;
203 base->haveDatum1 = true;
204 base->arg = tupDesc; /* assume we need not copy tupDesc */
205
206 /* Prepare SortSupport data for each column */
207 base->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
208
209 for (i = 0; i < nkeys; i++)
210 {
211 SortSupport sortKey = base->sortKeys + i;
212
213 Assert(attNums[i] != 0);
214 Assert(sortOperators[i] != 0);
215
217 sortKey->ssup_collation = sortCollations[i];
218 sortKey->ssup_nulls_first = nullsFirstFlags[i];
219 sortKey->ssup_attno = attNums[i];
220 /* Convey if abbreviation optimization is applicable in principle */
221 sortKey->abbreviate = (i == 0 && base->haveDatum1);
222
223 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
224 }
225
226 /*
227 * The "onlyKey" optimization cannot be used with abbreviated keys, since
228 * tie-breaker comparisons may be required. Typically, the optimization
229 * is only of value to pass-by-value types anyway, whereas abbreviated
230 * keys are typically only of value to pass-by-reference types.
231 */
232 if (nkeys == 1 && !base->sortKeys->abbrev_converter)
233 base->onlyKey = base->sortKeys;
234
235 MemoryContextSwitchTo(oldcontext);
236
237 return state;
238}
static void readtup_heap(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static int comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void writetup_heap(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static int comparetup_heap_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void removeabbrev_heap(Tuplesortstate *state, SortTuple *stups, int count)
#define HEAP_SORT

References SortSupportData::abbrev_converter, SortSupportData::abbreviate, TuplesortPublic::arg, Assert, TuplesortPublic::comparetup, comparetup_heap(), comparetup_heap_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, elog, TuplesortPublic::haveDatum1, HEAP_SORT, i, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, TuplesortPublic::onlyKey, palloc0(), PARALLEL_SORT, PrepareSortSupportFromOrderingOp(), TuplesortPublic::readtup, readtup_heap(), TuplesortPublic::removeabbrev, removeabbrev_heap(), TuplesortPublic::sortKeys, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_heap().

Referenced by ExecIncrementalSort(), ExecSort(), initialize_aggregate(), initialize_phase(), ordered_set_startup(), and switchToPresortedPrefixMode().

◆ tuplesort_begin_index_brin()

Tuplesortstate * tuplesort_begin_index_brin ( int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 545 of file tuplesortvariants.c.

548{
549 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
550 sortopt);
552
553 if (trace_sort)
554 elog(LOG,
555 "begin index sort: workMem = %d, randomAccess = %c",
556 workMem,
557 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
558
559 base->nKeys = 1; /* Only one sort column, the block number */
560
565 base->haveDatum1 = true;
566 base->arg = NULL;
567
568 return state;
569}
static void writetup_index_brin(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void removeabbrev_index_brin(Tuplesortstate *state, SortTuple *stups, int count)
static void readtup_index_brin(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static int comparetup_index_brin(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)

References TuplesortPublic::arg, TuplesortPublic::comparetup, comparetup_index_brin(), elog, TuplesortPublic::haveDatum1, LOG, TuplesortPublic::nKeys, TuplesortPublic::readtup, readtup_index_brin(), TuplesortPublic::removeabbrev, removeabbrev_index_brin(), trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index_brin().

Referenced by _brin_parallel_scan_and_build(), and brinbuild().

◆ tuplesort_begin_index_btree()

Tuplesortstate * tuplesort_begin_index_btree ( Relation  heapRel,
Relation  indexRel,
bool  enforceUnique,
bool  uniqueNullsNotDistinct,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 348 of file tuplesortvariants.c.

355{
356 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
357 sortopt);
359 BTScanInsert indexScanKey;
361 MemoryContext oldcontext;
362 int i;
363
364 oldcontext = MemoryContextSwitchTo(base->maincontext);
366
367 if (trace_sort)
368 elog(LOG,
369 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
370 enforceUnique ? 't' : 'f',
371 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
372
374
375 TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
376 enforceUnique,
377 base->nKeys,
378 workMem,
379 sortopt & TUPLESORT_RANDOMACCESS,
380 PARALLEL_SORT(coordinate));
381
385 base->writetup = writetup_index;
386 base->readtup = readtup_index;
387 base->haveDatum1 = true;
388 base->arg = arg;
389
390 arg->index.heapRel = heapRel;
391 arg->index.indexRel = indexRel;
392 arg->enforceUnique = enforceUnique;
393 arg->uniqueNullsNotDistinct = uniqueNullsNotDistinct;
394
395 indexScanKey = _bt_mkscankey(indexRel, NULL);
396
397 /* Prepare SortSupport data for each column */
398 base->sortKeys = (SortSupport) palloc0(base->nKeys *
399 sizeof(SortSupportData));
400
401 for (i = 0; i < base->nKeys; i++)
402 {
403 SortSupport sortKey = base->sortKeys + i;
404 ScanKey scanKey = indexScanKey->scankeys + i;
405 int16 strategy;
406
408 sortKey->ssup_collation = scanKey->sk_collation;
409 sortKey->ssup_nulls_first =
410 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
411 sortKey->ssup_attno = scanKey->sk_attno;
412 /* Convey if abbreviation optimization is applicable in principle */
413 sortKey->abbreviate = (i == 0 && base->haveDatum1);
414
415 Assert(sortKey->ssup_attno != 0);
416
417 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
419
420 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
421 }
422
423 pfree(indexScanKey);
424
425 MemoryContextSwitchTo(oldcontext);
426
427 return state;
428}
static int comparetup_index_btree_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_btree(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_index(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void removeabbrev_index(Tuplesortstate *state, SortTuple *stups, int count)
#define INDEX_SORT
static void writetup_index(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)

References _bt_mkscankey(), SortSupportData::abbreviate, arg, TuplesortPublic::arg, Assert, BTGreaterStrategyNumber, BTLessStrategyNumber, TuplesortPublic::comparetup, comparetup_index_btree(), comparetup_index_btree_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, elog, TuplesortPublic::haveDatum1, i, INDEX_SORT, IndexRelationGetNumberOfKeyAttributes, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc(), palloc0(), PARALLEL_SORT, pfree(), PrepareSortSupportFromIndexRel(), TuplesortPublic::readtup, readtup_index(), TuplesortPublic::removeabbrev, removeabbrev_index(), BTScanInsertData::scankeys, ScanKeyData::sk_attno, SK_BT_DESC, SK_BT_NULLS_FIRST, ScanKeyData::sk_collation, ScanKeyData::sk_flags, TuplesortPublic::sortKeys, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index().

Referenced by _bt_parallel_scan_and_sort(), and _bt_spools_heapscan().

◆ tuplesort_begin_index_gist()

Tuplesortstate * tuplesort_begin_index_gist ( Relation  heapRel,
Relation  indexRel,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 482 of file tuplesortvariants.c.

487{
488 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
489 sortopt);
491 MemoryContext oldcontext;
493 int i;
494
495 oldcontext = MemoryContextSwitchTo(base->maincontext);
497
498 if (trace_sort)
499 elog(LOG,
500 "begin index sort: workMem = %d, randomAccess = %c",
501 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
502
504
508 base->writetup = writetup_index;
509 base->readtup = readtup_index;
510 base->haveDatum1 = true;
511 base->arg = arg;
512
513 arg->index.heapRel = heapRel;
514 arg->index.indexRel = indexRel;
515 arg->enforceUnique = false;
516 arg->uniqueNullsNotDistinct = false;
517
518 /* Prepare SortSupport data for each column */
519 base->sortKeys = (SortSupport) palloc0(base->nKeys *
520 sizeof(SortSupportData));
521
522 for (i = 0; i < base->nKeys; i++)
523 {
524 SortSupport sortKey = base->sortKeys + i;
525
527 sortKey->ssup_collation = indexRel->rd_indcollation[i];
528 sortKey->ssup_nulls_first = false;
529 sortKey->ssup_attno = i + 1;
530 /* Convey if abbreviation optimization is applicable in principle */
531 sortKey->abbreviate = (i == 0 && base->haveDatum1);
532
533 Assert(sortKey->ssup_attno != 0);
534
535 /* Look for a sort support function */
536 PrepareSortSupportFromGistIndexRel(indexRel, sortKey);
537 }
538
539 MemoryContextSwitchTo(oldcontext);
540
541 return state;
542}
void PrepareSortSupportFromGistIndexRel(Relation indexRel, SortSupport ssup)
Definition: sortsupport.c:188
Oid * rd_indcollation
Definition: rel.h:217

References SortSupportData::abbreviate, arg, TuplesortPublic::arg, Assert, TuplesortPublic::comparetup, comparetup_index_btree(), comparetup_index_btree_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, elog, TuplesortPublic::haveDatum1, i, IndexRelationGetNumberOfKeyAttributes, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc(), palloc0(), PrepareSortSupportFromGistIndexRel(), RelationData::rd_indcollation, TuplesortPublic::readtup, readtup_index(), TuplesortPublic::removeabbrev, removeabbrev_index(), TuplesortPublic::sortKeys, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index().

Referenced by gistbuild().

◆ tuplesort_begin_index_hash()

Tuplesortstate * tuplesort_begin_index_hash ( Relation  heapRel,
Relation  indexRel,
uint32  high_mask,
uint32  low_mask,
uint32  max_buckets,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 431 of file tuplesortvariants.c.

439{
440 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
441 sortopt);
443 MemoryContext oldcontext;
445
446 oldcontext = MemoryContextSwitchTo(base->maincontext);
448
449 if (trace_sort)
450 elog(LOG,
451 "begin index sort: high_mask = 0x%x, low_mask = 0x%x, "
452 "max_buckets = 0x%x, workMem = %d, randomAccess = %c",
453 high_mask,
454 low_mask,
455 max_buckets,
456 workMem,
457 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
458
459 base->nKeys = 1; /* Only one sort column, the hash code */
460
464 base->writetup = writetup_index;
465 base->readtup = readtup_index;
466 base->haveDatum1 = true;
467 base->arg = arg;
468
469 arg->index.heapRel = heapRel;
470 arg->index.indexRel = indexRel;
471
472 arg->high_mask = high_mask;
473 arg->low_mask = low_mask;
474 arg->max_buckets = max_buckets;
475
476 MemoryContextSwitchTo(oldcontext);
477
478 return state;
479}
static int comparetup_index_hash(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_hash_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)

References arg, TuplesortPublic::arg, TuplesortPublic::comparetup, comparetup_index_hash(), comparetup_index_hash_tiebreak(), TuplesortPublic::comparetup_tiebreak, elog, TuplesortPublic::haveDatum1, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc(), TuplesortPublic::readtup, readtup_index(), TuplesortPublic::removeabbrev, removeabbrev_index(), trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index().

Referenced by _h_spoolinit().

◆ tuplesort_end()

void tuplesort_end ( Tuplesortstate state)

Definition at line 951 of file tuplesort.c.

952{
954
955 /*
956 * Free the main memory context, including the Tuplesortstate struct
957 * itself.
958 */
959 MemoryContextDelete(state->base.maincontext);
960}
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:454
static void tuplesort_free(Tuplesortstate *state)
Definition: tuplesort.c:897

References MemoryContextDelete(), and tuplesort_free().

Referenced by _brin_parallel_merge(), _brin_parallel_scan_and_build(), _bt_parallel_scan_and_sort(), _bt_spooldestroy(), _h_spooldestroy(), ExecEndAgg(), ExecEndIncrementalSort(), ExecEndSort(), ExecReScanAgg(), ExecReScanSort(), gistbuild(), heapam_relation_copy_for_cluster(), initialize_aggregate(), initialize_phase(), ordered_set_shutdown(), process_ordered_aggregate_multi(), process_ordered_aggregate_single(), and validate_index().

◆ tuplesort_estimate_shared()

Size tuplesort_estimate_shared ( int  nWorkers)

Definition at line 2917 of file tuplesort.c.

2918{
2919 Size tapesSize;
2920
2921 Assert(nWorkers > 0);
2922
2923 /* Make sure that BufFile shared state is MAXALIGN'd */
2924 tapesSize = mul_size(sizeof(TapeShare), nWorkers);
2925 tapesSize = MAXALIGN(add_size(tapesSize, offsetof(Sharedsort, tapes)));
2926
2927 return tapesSize;
2928}
#define MAXALIGN(LEN)
Definition: c.h:765
size_t Size
Definition: c.h:559
Size add_size(Size s1, Size s2)
Definition: shmem.c:488
Size mul_size(Size s1, Size s2)
Definition: shmem.c:505

References add_size(), Assert, MAXALIGN, and mul_size().

Referenced by _brin_begin_parallel(), and _bt_begin_parallel().

◆ tuplesort_get_stats()

void tuplesort_get_stats ( Tuplesortstate state,
TuplesortInstrumentation stats 
)

Definition at line 2499 of file tuplesort.c.

2501{
2502 /*
2503 * Note: it might seem we should provide both memory and disk usage for a
2504 * disk-based sort. However, the current code doesn't track memory space
2505 * accurately once we have begun to return tuples to the caller (since we
2506 * don't account for pfree's the caller is expected to do), so we cannot
2507 * rely on availMem in a disk sort. This does not seem worth the overhead
2508 * to fix. Is it worth creating an API for the memory context code to
2509 * tell us how much is actually used in sortcontext?
2510 */
2512
2513 if (state->isMaxSpaceDisk)
2515 else
2517 stats->spaceUsed = (state->maxSpace + 1023) / 1024;
2518
2519 switch (state->maxSpaceStatus)
2520 {
2521 case TSS_SORTEDINMEM:
2522 if (state->boundUsed)
2524 else
2526 break;
2527 case TSS_SORTEDONTAPE:
2529 break;
2530 case TSS_FINALMERGE:
2532 break;
2533 default:
2535 break;
2536 }
2537}
TuplesortMethod sortMethod
Definition: tuplesort.h:112
TuplesortSpaceType spaceType
Definition: tuplesort.h:113
@ TSS_SORTEDONTAPE
Definition: tuplesort.c:160
@ TSS_SORTEDINMEM
Definition: tuplesort.c:159
@ TSS_FINALMERGE
Definition: tuplesort.c:161
static void tuplesort_updatemax(Tuplesortstate *state)
Definition: tuplesort.c:968

References SORT_SPACE_TYPE_DISK, SORT_SPACE_TYPE_MEMORY, SORT_TYPE_EXTERNAL_MERGE, SORT_TYPE_EXTERNAL_SORT, SORT_TYPE_QUICKSORT, SORT_TYPE_STILL_IN_PROGRESS, SORT_TYPE_TOP_N_HEAPSORT, TuplesortInstrumentation::sortMethod, TuplesortInstrumentation::spaceType, TuplesortInstrumentation::spaceUsed, TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and tuplesort_updatemax().

Referenced by ExecSort(), instrumentSortedGroup(), and show_sort_info().

◆ tuplesort_getbrintuple()

BrinTuple * tuplesort_getbrintuple ( Tuplesortstate state,
Size len,
bool  forward 
)

Definition at line 956 of file tuplesortvariants.c.

957{
960 SortTuple stup;
961 BrinSortTuple *btup;
962
963 if (!tuplesort_gettuple_common(state, forward, &stup))
964 stup.tuple = NULL;
965
966 MemoryContextSwitchTo(oldcontext);
967
968 if (!stup.tuple)
969 return NULL;
970
971 btup = (BrinSortTuple *) stup.tuple;
972
973 *len = btup->tuplen;
974
975 return &btup->tuple;
976}
void * tuple
Definition: tuplesort.h:149
MemoryContext sortcontext
Definition: tuplesort.h:220
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
Definition: tuplesort.c:1470

References len, MemoryContextSwitchTo(), TuplesortPublic::sortcontext, BrinSortTuple::tuple, SortTuple::tuple, BrinSortTuple::tuplen, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by _brin_parallel_merge().

◆ tuplesort_getdatum()

bool tuplesort_getdatum ( Tuplesortstate state,
bool  forward,
bool  copy,
Datum val,
bool *  isNull,
Datum abbrev 
)

Definition at line 1004 of file tuplesortvariants.c.

1006{
1010 SortTuple stup;
1011
1012 if (!tuplesort_gettuple_common(state, forward, &stup))
1013 {
1014 MemoryContextSwitchTo(oldcontext);
1015 return false;
1016 }
1017
1018 /* Ensure we copy into caller's memory context */
1019 MemoryContextSwitchTo(oldcontext);
1020
1021 /* Record abbreviated key for caller */
1022 if (base->sortKeys->abbrev_converter && abbrev)
1023 *abbrev = stup.datum1;
1024
1025 if (stup.isnull1 || !base->tuples)
1026 {
1027 *val = stup.datum1;
1028 *isNull = stup.isnull1;
1029 }
1030 else
1031 {
1032 /* use stup.tuple because stup.datum1 may be an abbreviation */
1033 if (copy)
1034 *val = datumCopy(PointerGetDatum(stup.tuple), false,
1035 arg->datumTypeLen);
1036 else
1037 *val = PointerGetDatum(stup.tuple);
1038 *isNull = false;
1039 }
1040
1041 return true;
1042}
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
long val
Definition: informix.c:689
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, datumCopy(), if(), MemoryContextSwitchTo(), PointerGetDatum(), TuplesortPublic::sortcontext, TuplesortPublic::sortKeys, TuplesortPublic::tuples, tuplesort_gettuple_common(), TuplesortstateGetPublic, and val.

Referenced by ExecSort(), heapam_index_validate_scan(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), percentile_disc_multi_final(), and process_ordered_aggregate_single().

◆ tuplesort_getheaptuple()

HeapTuple tuplesort_getheaptuple ( Tuplesortstate state,
bool  forward 
)

Definition at line 914 of file tuplesortvariants.c.

915{
918 SortTuple stup;
919
920 if (!tuplesort_gettuple_common(state, forward, &stup))
921 stup.tuple = NULL;
922
923 MemoryContextSwitchTo(oldcontext);
924
925 return stup.tuple;
926}

References MemoryContextSwitchTo(), TuplesortPublic::sortcontext, SortTuple::tuple, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by heapam_relation_copy_for_cluster().

◆ tuplesort_getindextuple()

IndexTuple tuplesort_getindextuple ( Tuplesortstate state,
bool  forward 
)

Definition at line 935 of file tuplesortvariants.c.

936{
939 SortTuple stup;
940
941 if (!tuplesort_gettuple_common(state, forward, &stup))
942 stup.tuple = NULL;
943
944 MemoryContextSwitchTo(oldcontext);
945
946 return (IndexTuple) stup.tuple;
947}

References MemoryContextSwitchTo(), TuplesortPublic::sortcontext, SortTuple::tuple, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by _bt_load(), _h_indexbuild(), and gist_indexsortbuild().

◆ tuplesort_gettuple_common()

bool tuplesort_gettuple_common ( Tuplesortstate state,
bool  forward,
SortTuple stup 
)

Definition at line 1470 of file tuplesort.c.

1472{
1473 unsigned int tuplen;
1474 size_t nmoved;
1475
1476 Assert(!WORKER(state));
1477
1478 switch (state->status)
1479 {
1480 case TSS_SORTEDINMEM:
1481 Assert(forward || state->base.sortopt & TUPLESORT_RANDOMACCESS);
1482 Assert(!state->slabAllocatorUsed);
1483 if (forward)
1484 {
1485 if (state->current < state->memtupcount)
1486 {
1487 *stup = state->memtuples[state->current++];
1488 return true;
1489 }
1490 state->eof_reached = true;
1491
1492 /*
1493 * Complain if caller tries to retrieve more tuples than
1494 * originally asked for in a bounded sort. This is because
1495 * returning EOF here might be the wrong thing.
1496 */
1497 if (state->bounded && state->current >= state->bound)
1498 elog(ERROR, "retrieved too many tuples in a bounded sort");
1499
1500 return false;
1501 }
1502 else
1503 {
1504 if (state->current <= 0)
1505 return false;
1506
1507 /*
1508 * if all tuples are fetched already then we return last
1509 * tuple, else - tuple before last returned.
1510 */
1511 if (state->eof_reached)
1512 state->eof_reached = false;
1513 else
1514 {
1515 state->current--; /* last returned tuple */
1516 if (state->current <= 0)
1517 return false;
1518 }
1519 *stup = state->memtuples[state->current - 1];
1520 return true;
1521 }
1522 break;
1523
1524 case TSS_SORTEDONTAPE:
1525 Assert(forward || state->base.sortopt & TUPLESORT_RANDOMACCESS);
1526 Assert(state->slabAllocatorUsed);
1527
1528 /*
1529 * The slot that held the tuple that we returned in previous
1530 * gettuple call can now be reused.
1531 */
1532 if (state->lastReturnedTuple)
1533 {
1534 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1535 state->lastReturnedTuple = NULL;
1536 }
1537
1538 if (forward)
1539 {
1540 if (state->eof_reached)
1541 return false;
1542
1543 if ((tuplen = getlen(state->result_tape, true)) != 0)
1544 {
1545 READTUP(state, stup, state->result_tape, tuplen);
1546
1547 /*
1548 * Remember the tuple we return, so that we can recycle
1549 * its memory on next call. (This can be NULL, in the
1550 * !state->tuples case).
1551 */
1552 state->lastReturnedTuple = stup->tuple;
1553
1554 return true;
1555 }
1556 else
1557 {
1558 state->eof_reached = true;
1559 return false;
1560 }
1561 }
1562
1563 /*
1564 * Backward.
1565 *
1566 * if all tuples are fetched already then we return last tuple,
1567 * else - tuple before last returned.
1568 */
1569 if (state->eof_reached)
1570 {
1571 /*
1572 * Seek position is pointing just past the zero tuplen at the
1573 * end of file; back up to fetch last tuple's ending length
1574 * word. If seek fails we must have a completely empty file.
1575 */
1576 nmoved = LogicalTapeBackspace(state->result_tape,
1577 2 * sizeof(unsigned int));
1578 if (nmoved == 0)
1579 return false;
1580 else if (nmoved != 2 * sizeof(unsigned int))
1581 elog(ERROR, "unexpected tape position");
1582 state->eof_reached = false;
1583 }
1584 else
1585 {
1586 /*
1587 * Back up and fetch previously-returned tuple's ending length
1588 * word. If seek fails, assume we are at start of file.
1589 */
1590 nmoved = LogicalTapeBackspace(state->result_tape,
1591 sizeof(unsigned int));
1592 if (nmoved == 0)
1593 return false;
1594 else if (nmoved != sizeof(unsigned int))
1595 elog(ERROR, "unexpected tape position");
1596 tuplen = getlen(state->result_tape, false);
1597
1598 /*
1599 * Back up to get ending length word of tuple before it.
1600 */
1601 nmoved = LogicalTapeBackspace(state->result_tape,
1602 tuplen + 2 * sizeof(unsigned int));
1603 if (nmoved == tuplen + sizeof(unsigned int))
1604 {
1605 /*
1606 * We backed up over the previous tuple, but there was no
1607 * ending length word before it. That means that the prev
1608 * tuple is the first tuple in the file. It is now the
1609 * next to read in forward direction (not obviously right,
1610 * but that is what in-memory case does).
1611 */
1612 return false;
1613 }
1614 else if (nmoved != tuplen + 2 * sizeof(unsigned int))
1615 elog(ERROR, "bogus tuple length in backward scan");
1616 }
1617
1618 tuplen = getlen(state->result_tape, false);
1619
1620 /*
1621 * Now we have the length of the prior tuple, back up and read it.
1622 * Note: READTUP expects we are positioned after the initial
1623 * length word of the tuple, so back up to that point.
1624 */
1625 nmoved = LogicalTapeBackspace(state->result_tape,
1626 tuplen);
1627 if (nmoved != tuplen)
1628 elog(ERROR, "bogus tuple length in backward scan");
1629 READTUP(state, stup, state->result_tape, tuplen);
1630
1631 /*
1632 * Remember the tuple we return, so that we can recycle its memory
1633 * on next call. (This can be NULL, in the Datum case).
1634 */
1635 state->lastReturnedTuple = stup->tuple;
1636
1637 return true;
1638
1639 case TSS_FINALMERGE:
1640 Assert(forward);
1641 /* We are managing memory ourselves, with the slab allocator. */
1642 Assert(state->slabAllocatorUsed);
1643
1644 /*
1645 * The slab slot holding the tuple that we returned in previous
1646 * gettuple call can now be reused.
1647 */
1648 if (state->lastReturnedTuple)
1649 {
1650 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1651 state->lastReturnedTuple = NULL;
1652 }
1653
1654 /*
1655 * This code should match the inner loop of mergeonerun().
1656 */
1657 if (state->memtupcount > 0)
1658 {
1659 int srcTapeIndex = state->memtuples[0].srctape;
1660 LogicalTape *srcTape = state->inputTapes[srcTapeIndex];
1661 SortTuple newtup;
1662
1663 *stup = state->memtuples[0];
1664
1665 /*
1666 * Remember the tuple we return, so that we can recycle its
1667 * memory on next call. (This can be NULL, in the Datum case).
1668 */
1669 state->lastReturnedTuple = stup->tuple;
1670
1671 /*
1672 * Pull next tuple from tape, and replace the returned tuple
1673 * at top of the heap with it.
1674 */
1675 if (!mergereadnext(state, srcTape, &newtup))
1676 {
1677 /*
1678 * If no more data, we've reached end of run on this tape.
1679 * Remove the top node from the heap.
1680 */
1682 state->nInputRuns--;
1683
1684 /*
1685 * Close the tape. It'd go away at the end of the sort
1686 * anyway, but better to release the memory early.
1687 */
1688 LogicalTapeClose(srcTape);
1689 return true;
1690 }
1691 newtup.srctape = srcTapeIndex;
1693 return true;
1694 }
1695 return false;
1696
1697 default:
1698 elog(ERROR, "invalid tuplesort state");
1699 return false; /* keep compiler quiet */
1700 }
1701}
size_t LogicalTapeBackspace(LogicalTape *lt, size_t size)
Definition: logtape.c:1062
void LogicalTapeClose(LogicalTape *lt)
Definition: logtape.c:733
int srctape
Definition: tuplesort.h:152
static void tuplesort_heap_delete_top(Tuplesortstate *state)
Definition: tuplesort.c:2774
static unsigned int getlen(LogicalTape *tape, bool eofOK)
Definition: tuplesort.c:2856
#define READTUP(state, stup, tape, len)
Definition: tuplesort.c:398
#define WORKER(state)
Definition: tuplesort.c:404
static bool mergereadnext(Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
Definition: tuplesort.c:2288
#define RELEASE_SLAB_SLOT(state, tuple)
Definition: tuplesort.c:383
static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple)
Definition: tuplesort.c:2798

References Assert, elog, ERROR, getlen(), LogicalTapeBackspace(), LogicalTapeClose(), mergereadnext(), READTUP, RELEASE_SLAB_SLOT, SortTuple::srctape, TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, SortTuple::tuple, tuplesort_heap_delete_top(), tuplesort_heap_replace_top(), TUPLESORT_RANDOMACCESS, and WORKER.

Referenced by tuplesort_getbrintuple(), tuplesort_getdatum(), tuplesort_getheaptuple(), tuplesort_getindextuple(), tuplesort_gettupleslot(), and tuplesort_skiptuples().

◆ tuplesort_gettupleslot()

bool tuplesort_gettupleslot ( Tuplesortstate state,
bool  forward,
bool  copy,
TupleTableSlot slot,
Datum abbrev 
)

Definition at line 876 of file tuplesortvariants.c.

878{
881 SortTuple stup;
882
883 if (!tuplesort_gettuple_common(state, forward, &stup))
884 stup.tuple = NULL;
885
886 MemoryContextSwitchTo(oldcontext);
887
888 if (stup.tuple)
889 {
890 /* Record abbreviated key for caller */
891 if (base->sortKeys->abbrev_converter && abbrev)
892 *abbrev = stup.datum1;
893
894 if (copy)
896
897 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, copy);
898 return true;
899 }
900 else
901 {
902 ExecClearTuple(slot);
903 return false;
904 }
905}
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1633
MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup)
Definition: heaptuple.c:1536
Datum datum1
Definition: tuplesort.h:150
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:454

References SortSupportData::abbrev_converter, SortTuple::datum1, ExecClearTuple(), ExecStoreMinimalTuple(), heap_copy_minimal_tuple(), MemoryContextSwitchTo(), TuplesortPublic::sortcontext, TuplesortPublic::sortKeys, SortTuple::tuple, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by ExecIncrementalSort(), ExecSort(), fetch_input_tuple(), hypothetical_dense_rank_final(), hypothetical_rank_common(), process_ordered_aggregate_multi(), and switchToPresortedPrefixMode().

◆ tuplesort_initialize_shared()

void tuplesort_initialize_shared ( Sharedsort shared,
int  nWorkers,
dsm_segment seg 
)

Definition at line 2938 of file tuplesort.c.

2939{
2940 int i;
2941
2942 Assert(nWorkers > 0);
2943
2944 SpinLockInit(&shared->mutex);
2945 shared->currentWorker = 0;
2946 shared->workersFinished = 0;
2947 SharedFileSetInit(&shared->fileset, seg);
2948 shared->nTapes = nWorkers;
2949 for (i = 0; i < nWorkers; i++)
2950 {
2951 shared->tapes[i].firstblocknumber = 0L;
2952 }
2953}
void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
Definition: sharedfileset.c:38
#define SpinLockInit(lock)
Definition: spin.h:57
TapeShare tapes[FLEXIBLE_ARRAY_MEMBER]
Definition: tuplesort.c:369
int workersFinished
Definition: tuplesort.c:357
int nTapes
Definition: tuplesort.c:363
slock_t mutex
Definition: tuplesort.c:346
int currentWorker
Definition: tuplesort.c:356
int64 firstblocknumber
Definition: logtape.h:54

References Assert, Sharedsort::currentWorker, Sharedsort::fileset, TapeShare::firstblocknumber, i, Sharedsort::mutex, Sharedsort::nTapes, SharedFileSetInit(), SpinLockInit, Sharedsort::tapes, and Sharedsort::workersFinished.

Referenced by _brin_begin_parallel(), and _bt_begin_parallel().

◆ tuplesort_markpos()

void tuplesort_markpos ( Tuplesortstate state)

Definition at line 2435 of file tuplesort.c.

2436{
2437 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2438
2439 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2440
2441 switch (state->status)
2442 {
2443 case TSS_SORTEDINMEM:
2444 state->markpos_offset = state->current;
2445 state->markpos_eof = state->eof_reached;
2446 break;
2447 case TSS_SORTEDONTAPE:
2448 LogicalTapeTell(state->result_tape,
2449 &state->markpos_block,
2450 &state->markpos_offset);
2451 state->markpos_eof = state->eof_reached;
2452 break;
2453 default:
2454 elog(ERROR, "invalid tuplesort state");
2455 break;
2456 }
2457
2458 MemoryContextSwitchTo(oldcontext);
2459}
void LogicalTapeTell(LogicalTape *lt, int64 *blocknum, int *offset)
Definition: logtape.c:1162

References Assert, elog, ERROR, LogicalTapeTell(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecSortMarkPos().

◆ tuplesort_merge_order()

int tuplesort_merge_order ( int64  allowedMem)

Definition at line 1778 of file tuplesort.c.

1779{
1780 int mOrder;
1781
1782 /*----------
1783 * In the merge phase, we need buffer space for each input and output tape.
1784 * Each pass in the balanced merge algorithm reads from M input tapes, and
1785 * writes to N output tapes. Each tape consumes TAPE_BUFFER_OVERHEAD bytes
1786 * of memory. In addition to that, we want MERGE_BUFFER_SIZE workspace per
1787 * input tape.
1788 *
1789 * totalMem = M * (TAPE_BUFFER_OVERHEAD + MERGE_BUFFER_SIZE) +
1790 * N * TAPE_BUFFER_OVERHEAD
1791 *
1792 * Except for the last and next-to-last merge passes, where there can be
1793 * fewer tapes left to process, M = N. We choose M so that we have the
1794 * desired amount of memory available for the input buffers
1795 * (TAPE_BUFFER_OVERHEAD + MERGE_BUFFER_SIZE), given the total memory
1796 * available for the tape buffers (allowedMem).
1797 *
1798 * Note: you might be thinking we need to account for the memtuples[]
1799 * array in this calculation, but we effectively treat that as part of the
1800 * MERGE_BUFFER_SIZE workspace.
1801 *----------
1802 */
1803 mOrder = allowedMem /
1805
1806 /*
1807 * Even in minimum memory, use at least a MINORDER merge. On the other
1808 * hand, even when we have lots of memory, do not use more than a MAXORDER
1809 * merge. Tapes are pretty cheap, but they're not entirely free. Each
1810 * additional tape reduces the amount of memory available to build runs,
1811 * which in turn can cause the same sort to need more runs, which makes
1812 * merging slower even if it can still be done in a single pass. Also,
1813 * high order merges are quite slow due to CPU cache effects; it can be
1814 * faster to pay the I/O cost of a multi-pass merge than to perform a
1815 * single merge pass across many hundreds of tapes.
1816 */
1817 mOrder = Max(mOrder, MINORDER);
1818 mOrder = Min(mOrder, MAXORDER);
1819
1820 return mOrder;
1821}
#define Min(x, y)
Definition: c.h:958
#define TAPE_BUFFER_OVERHEAD
Definition: tuplesort.c:178
#define MAXORDER
Definition: tuplesort.c:177
#define MERGE_BUFFER_SIZE
Definition: tuplesort.c:179
#define MINORDER
Definition: tuplesort.c:176

References Max, MAXORDER, MERGE_BUFFER_SIZE, Min, MINORDER, and TAPE_BUFFER_OVERHEAD.

Referenced by cost_tuplesort(), and inittapes().

◆ tuplesort_method_name()

const char * tuplesort_method_name ( TuplesortMethod  m)

Definition at line 2543 of file tuplesort.c.

2544{
2545 switch (m)
2546 {
2548 return "still in progress";
2550 return "top-N heapsort";
2552 return "quicksort";
2554 return "external sort";
2556 return "external merge";
2557 }
2558
2559 return "unknown";
2560}

References SORT_TYPE_EXTERNAL_MERGE, SORT_TYPE_EXTERNAL_SORT, SORT_TYPE_QUICKSORT, SORT_TYPE_STILL_IN_PROGRESS, and SORT_TYPE_TOP_N_HEAPSORT.

Referenced by show_incremental_sort_group_info(), and show_sort_info().

◆ tuplesort_performsort()

void tuplesort_performsort ( Tuplesortstate state)

Definition at line 1363 of file tuplesort.c.

1364{
1365 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1366
1367 if (trace_sort)
1368 elog(LOG, "performsort of worker %d starting: %s",
1369 state->worker, pg_rusage_show(&state->ru_start));
1370
1371 switch (state->status)
1372 {
1373 case TSS_INITIAL:
1374
1375 /*
1376 * We were able to accumulate all the tuples within the allowed
1377 * amount of memory, or leader to take over worker tapes
1378 */
1379 if (SERIAL(state))
1380 {
1381 /* Just qsort 'em and we're done */
1383 state->status = TSS_SORTEDINMEM;
1384 }
1385 else if (WORKER(state))
1386 {
1387 /*
1388 * Parallel workers must still dump out tuples to tape. No
1389 * merge is required to produce single output run, though.
1390 */
1391 inittapes(state, false);
1392 dumptuples(state, true);
1394 state->status = TSS_SORTEDONTAPE;
1395 }
1396 else
1397 {
1398 /*
1399 * Leader will take over worker tapes and merge worker runs.
1400 * Note that mergeruns sets the correct state->status.
1401 */
1404 }
1405 state->current = 0;
1406 state->eof_reached = false;
1407 state->markpos_block = 0L;
1408 state->markpos_offset = 0;
1409 state->markpos_eof = false;
1410 break;
1411
1412 case TSS_BOUNDED:
1413
1414 /*
1415 * We were able to accumulate all the tuples required for output
1416 * in memory, using a heap to eliminate excess tuples. Now we
1417 * have to transform the heap to a properly-sorted array. Note
1418 * that sort_bounded_heap sets the correct state->status.
1419 */
1421 state->current = 0;
1422 state->eof_reached = false;
1423 state->markpos_offset = 0;
1424 state->markpos_eof = false;
1425 break;
1426
1427 case TSS_BUILDRUNS:
1428
1429 /*
1430 * Finish tape-based sort. First, flush all tuples remaining in
1431 * memory out to tape; then merge until we have a single remaining
1432 * run (or, if !randomAccess and !WORKER(), one run per tape).
1433 * Note that mergeruns sets the correct state->status.
1434 */
1435 dumptuples(state, true);
1437 state->eof_reached = false;
1438 state->markpos_block = 0L;
1439 state->markpos_offset = 0;
1440 state->markpos_eof = false;
1441 break;
1442
1443 default:
1444 elog(ERROR, "invalid tuplesort state");
1445 break;
1446 }
1447
1448 if (trace_sort)
1449 {
1450 if (state->status == TSS_FINALMERGE)
1451 elog(LOG, "performsort of worker %d done (except %d-way final merge): %s",
1452 state->worker, state->nInputTapes,
1453 pg_rusage_show(&state->ru_start));
1454 else
1455 elog(LOG, "performsort of worker %d done: %s",
1456 state->worker, pg_rusage_show(&state->ru_start));
1457 }
1458
1459 MemoryContextSwitchTo(oldcontext);
1460}
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
#define SERIAL(state)
Definition: tuplesort.c:403
static void sort_bounded_heap(Tuplesortstate *state)
Definition: tuplesort.c:2636
@ TSS_INITIAL
Definition: tuplesort.c:156
@ TSS_BUILDRUNS
Definition: tuplesort.c:158
@ TSS_BOUNDED
Definition: tuplesort.c:157
static void leader_takeover_tapes(Tuplesortstate *state)
Definition: tuplesort.c:3069
static void tuplesort_sort_memtuples(Tuplesortstate *state)
Definition: tuplesort.c:2676
static void inittapes(Tuplesortstate *state, bool mergeruns)
Definition: tuplesort.c:1865
static void worker_nomergeruns(Tuplesortstate *state)
Definition: tuplesort.c:3047
static void mergeruns(Tuplesortstate *state)
Definition: tuplesort.c:2017
static void dumptuples(Tuplesortstate *state, bool alltuples)
Definition: tuplesort.c:2307

References dumptuples(), elog, ERROR, inittapes(), leader_takeover_tapes(), LOG, MemoryContextSwitchTo(), mergeruns(), pg_rusage_show(), SERIAL, sort_bounded_heap(), trace_sort, TSS_BOUNDED, TSS_BUILDRUNS, TSS_FINALMERGE, TSS_INITIAL, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, tuplesort_sort_memtuples(), WORKER, and worker_nomergeruns().

Referenced by _brin_parallel_merge(), _brin_parallel_scan_and_build(), _bt_leafbuild(), _bt_parallel_scan_and_sort(), _h_indexbuild(), ExecIncrementalSort(), ExecSort(), gistbuild(), heapam_relation_copy_for_cluster(), hypothetical_dense_rank_final(), hypothetical_rank_common(), initialize_phase(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), percentile_disc_multi_final(), process_ordered_aggregate_multi(), process_ordered_aggregate_single(), switchToPresortedPrefixMode(), and validate_index().

◆ tuplesort_putbrintuple()

void tuplesort_putbrintuple ( Tuplesortstate state,
BrinTuple tuple,
Size  size 
)

Definition at line 774 of file tuplesortvariants.c.

775{
776 SortTuple stup;
777 BrinSortTuple *bstup;
780 Size tuplen;
781
782 /* allocate space for the whole BRIN sort tuple */
784
785 bstup->tuplen = size;
786 memcpy(&bstup->tuple, tuple, size);
787
788 stup.tuple = bstup;
789 stup.datum1 = tuple->bt_blkno;
790 stup.isnull1 = false;
791
792 /* GetMemoryChunkSpace is not supported for bump contexts */
795 else
796 tuplen = GetMemoryChunkSpace(bstup);
797
799 base->sortKeys &&
800 base->sortKeys->abbrev_converter &&
801 !stup.isnull1, tuplen);
802
803 MemoryContextSwitchTo(oldcontext);
804}
Size GetMemoryChunkSpace(void *pointer)
Definition: mcxt.c:721
static pg_noinline void Size size
Definition: slab.c:607
BlockNumber bt_blkno
Definition: brin_tuple.h:66
bool isnull1
Definition: tuplesort.h:151
MemoryContext tuplecontext
Definition: tuplesort.h:221
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
Definition: tuplesort.c:1169
#define TupleSortUseBumpTupleCxt(opt)
Definition: tuplesort.h:108
#define BRINSORTTUPLE_SIZE(len)

References SortSupportData::abbrev_converter, BRINSORTTUPLE_SIZE, BrinTuple::bt_blkno, SortTuple::datum1, GetMemoryChunkSpace(), SortTuple::isnull1, MAXALIGN, MemoryContextSwitchTo(), palloc(), size, TuplesortPublic::sortKeys, TuplesortPublic::sortopt, BrinSortTuple::tuple, SortTuple::tuple, TuplesortPublic::tuplecontext, BrinSortTuple::tuplen, tuplesort_puttuple_common(), TuplesortstateGetPublic, and TupleSortUseBumpTupleCxt.

Referenced by form_and_spill_tuple().

◆ tuplesort_putdatum()

void tuplesort_putdatum ( Tuplesortstate state,
Datum  val,
bool  isNull 
)

Definition at line 812 of file tuplesortvariants.c.

813{
817 SortTuple stup;
818
819 /*
820 * Pass-by-value types or null values are just stored directly in
821 * stup.datum1 (and stup.tuple is not used and set to NULL).
822 *
823 * Non-null pass-by-reference values need to be copied into memory we
824 * control, and possibly abbreviated. The copied value is pointed to by
825 * stup.tuple and is treated as the canonical copy (e.g. to return via
826 * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
827 * abbreviated value if abbreviation is happening, otherwise it's
828 * identical to stup.tuple.
829 */
830
831 if (isNull || !base->tuples)
832 {
833 /*
834 * Set datum1 to zeroed representation for NULLs (to be consistent,
835 * and to support cheap inequality tests for NULL abbreviated keys).
836 */
837 stup.datum1 = !isNull ? val : (Datum) 0;
838 stup.isnull1 = isNull;
839 stup.tuple = NULL; /* no separate storage */
840 }
841 else
842 {
843 stup.isnull1 = false;
844 stup.datum1 = datumCopy(val, false, arg->datumTypeLen);
845 stup.tuple = DatumGetPointer(stup.datum1);
846 }
847
849 base->tuples &&
850 base->sortKeys->abbrev_converter && !isNull, 0);
851
852 MemoryContextSwitchTo(oldcontext);
853}
uintptr_t Datum
Definition: postgres.h:64
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, datumCopy(), DatumGetPointer(), if(), MemoryContextSwitchTo(), TuplesortPublic::sortKeys, TuplesortPublic::tuplecontext, TuplesortPublic::tuples, tuplesort_puttuple_common(), TuplesortstateGetPublic, and val.

Referenced by ExecEvalAggOrderedTransDatum(), ExecSort(), ordered_set_transition(), and validate_index_callback().

◆ tuplesort_putheaptuple()

void tuplesort_putheaptuple ( Tuplesortstate state,
HeapTuple  tup 
)

Definition at line 695 of file tuplesortvariants.c.

696{
697 SortTuple stup;
701 Size tuplen;
702
703 /* copy the tuple into sort storage */
704 tup = heap_copytuple(tup);
705 stup.tuple = tup;
706
707 /*
708 * set up first-column key value, and potentially abbreviate, if it's a
709 * simple column
710 */
711 if (base->haveDatum1)
712 {
713 stup.datum1 = heap_getattr(tup,
714 arg->indexInfo->ii_IndexAttrNumbers[0],
715 arg->tupDesc,
716 &stup.isnull1);
717 }
718
719 /* GetMemoryChunkSpace is not supported for bump contexts */
721 tuplen = MAXALIGN(HEAPTUPLESIZE + tup->t_len);
722 else
723 tuplen = GetMemoryChunkSpace(tup);
724
726 base->haveDatum1 &&
727 base->sortKeys->abbrev_converter &&
728 !stup.isnull1, tuplen);
729
730 MemoryContextSwitchTo(oldcontext);
731}
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:778
#define HEAPTUPLESIZE
Definition: htup.h:73
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:792
uint32 t_len
Definition: htup.h:64

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, SortTuple::datum1, GetMemoryChunkSpace(), TuplesortPublic::haveDatum1, heap_copytuple(), heap_getattr(), HEAPTUPLESIZE, SortTuple::isnull1, MAXALIGN, MemoryContextSwitchTo(), TuplesortPublic::sortKeys, TuplesortPublic::sortopt, HeapTupleData::t_len, SortTuple::tuple, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, and TupleSortUseBumpTupleCxt.

Referenced by heapam_relation_copy_for_cluster().

◆ tuplesort_putindextuplevalues()

void tuplesort_putindextuplevalues ( Tuplesortstate state,
Relation  rel,
ItemPointer  self,
const Datum values,
const bool *  isnull 
)

Definition at line 738 of file tuplesortvariants.c.

741{
742 SortTuple stup;
743 IndexTuple tuple;
746 Size tuplen;
747
749 isnull, base->tuplecontext);
750 tuple = ((IndexTuple) stup.tuple);
751 tuple->t_tid = *self;
752 /* set up first-column key value */
753 stup.datum1 = index_getattr(tuple,
754 1,
755 RelationGetDescr(arg->indexRel),
756 &stup.isnull1);
757
758 /* GetMemoryChunkSpace is not supported for bump contexts */
760 tuplen = MAXALIGN(tuple->t_info & INDEX_SIZE_MASK);
761 else
762 tuplen = GetMemoryChunkSpace(tuple);
763
765 base->sortKeys &&
766 base->sortKeys->abbrev_converter &&
767 !stup.isnull1, tuplen);
768}
static Datum values[MAXATTR]
Definition: bootstrap.c:151
IndexTuple index_form_tuple_context(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull, MemoryContext context)
Definition: indextuple.c:65
IndexTupleData * IndexTuple
Definition: itup.h:53
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:117
#define INDEX_SIZE_MASK
Definition: itup.h:65
#define RelationGetDescr(relation)
Definition: rel.h:531
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, SortTuple::datum1, GetMemoryChunkSpace(), index_form_tuple_context(), index_getattr(), INDEX_SIZE_MASK, SortTuple::isnull1, MAXALIGN, RelationGetDescr, TuplesortPublic::sortKeys, TuplesortPublic::sortopt, IndexTupleData::t_info, IndexTupleData::t_tid, SortTuple::tuple, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, TupleSortUseBumpTupleCxt, and values.

Referenced by _bt_spool(), _h_spool(), and gistSortedBuildCallback().

◆ tuplesort_puttuple_common()

void tuplesort_puttuple_common ( Tuplesortstate state,
SortTuple tuple,
bool  useAbbrev,
Size  tuplen 
)

Definition at line 1169 of file tuplesort.c.

1171{
1172 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1173
1174 Assert(!LEADER(state));
1175
1176 /* account for the memory used for this tuple */
1177 USEMEM(state, tuplen);
1178 state->tupleMem += tuplen;
1179
1180 if (!useAbbrev)
1181 {
1182 /*
1183 * Leave ordinary Datum representation, or NULL value. If there is a
1184 * converter it won't expect NULL values, and cost model is not
1185 * required to account for NULL, so in that case we avoid calling
1186 * converter and just set datum1 to zeroed representation (to be
1187 * consistent, and to support cheap inequality tests for NULL
1188 * abbreviated keys).
1189 */
1190 }
1191 else if (!consider_abort_common(state))
1192 {
1193 /* Store abbreviated key representation */
1194 tuple->datum1 = state->base.sortKeys->abbrev_converter(tuple->datum1,
1195 state->base.sortKeys);
1196 }
1197 else
1198 {
1199 /*
1200 * Set state to be consistent with never trying abbreviation.
1201 *
1202 * Alter datum1 representation in already-copied tuples, so as to
1203 * ensure a consistent representation (current tuple was just
1204 * handled). It does not matter if some dumped tuples are already
1205 * sorted on tape, since serialized tuples lack abbreviated keys
1206 * (TSS_BUILDRUNS state prevents control reaching here in any case).
1207 */
1208 REMOVEABBREV(state, state->memtuples, state->memtupcount);
1209 }
1210
1211 switch (state->status)
1212 {
1213 case TSS_INITIAL:
1214
1215 /*
1216 * Save the tuple into the unsorted array. First, grow the array
1217 * as needed. Note that we try to grow the array when there is
1218 * still one free slot remaining --- if we fail, there'll still be
1219 * room to store the incoming tuple, and then we'll switch to
1220 * tape-based operation.
1221 */
1222 if (state->memtupcount >= state->memtupsize - 1)
1223 {
1224 (void) grow_memtuples(state);
1225 Assert(state->memtupcount < state->memtupsize);
1226 }
1227 state->memtuples[state->memtupcount++] = *tuple;
1228
1229 /*
1230 * Check if it's time to switch over to a bounded heapsort. We do
1231 * so if the input tuple count exceeds twice the desired tuple
1232 * count (this is a heuristic for where heapsort becomes cheaper
1233 * than a quicksort), or if we've just filled workMem and have
1234 * enough tuples to meet the bound.
1235 *
1236 * Note that once we enter TSS_BOUNDED state we will always try to
1237 * complete the sort that way. In the worst case, if later input
1238 * tuples are larger than earlier ones, this might cause us to
1239 * exceed workMem significantly.
1240 */
1241 if (state->bounded &&
1242 (state->memtupcount > state->bound * 2 ||
1243 (state->memtupcount > state->bound && LACKMEM(state))))
1244 {
1245 if (trace_sort)
1246 elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1247 state->memtupcount,
1248 pg_rusage_show(&state->ru_start));
1250 MemoryContextSwitchTo(oldcontext);
1251 return;
1252 }
1253
1254 /*
1255 * Done if we still fit in available memory and have array slots.
1256 */
1257 if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1258 {
1259 MemoryContextSwitchTo(oldcontext);
1260 return;
1261 }
1262
1263 /*
1264 * Nope; time to switch to tape-based operation.
1265 */
1266 inittapes(state, true);
1267
1268 /*
1269 * Dump all tuples.
1270 */
1271 dumptuples(state, false);
1272 break;
1273
1274 case TSS_BOUNDED:
1275
1276 /*
1277 * We don't want to grow the array here, so check whether the new
1278 * tuple can be discarded before putting it in. This should be a
1279 * good speed optimization, too, since when there are many more
1280 * input tuples than the bound, most input tuples can be discarded
1281 * with just this one comparison. Note that because we currently
1282 * have the sort direction reversed, we must check for <= not >=.
1283 */
1284 if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1285 {
1286 /* new tuple <= top of the heap, so we can discard it */
1287 free_sort_tuple(state, tuple);
1289 }
1290 else
1291 {
1292 /* discard top of heap, replacing it with the new tuple */
1293 free_sort_tuple(state, &state->memtuples[0]);
1295 }
1296 break;
1297
1298 case TSS_BUILDRUNS:
1299
1300 /*
1301 * Save the tuple into the unsorted array (there must be space)
1302 */
1303 state->memtuples[state->memtupcount++] = *tuple;
1304
1305 /*
1306 * If we are over the memory limit, dump all tuples.
1307 */
1308 dumptuples(state, false);
1309 break;
1310
1311 default:
1312 elog(ERROR, "invalid tuplesort state");
1313 break;
1314 }
1315 MemoryContextSwitchTo(oldcontext);
1316}
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define COMPARETUP(state, a, b)
Definition: tuplesort.c:396
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
Definition: tuplesort.c:3128
#define REMOVEABBREV(state, stup, count)
Definition: tuplesort.c:395
#define LACKMEM(state)
Definition: tuplesort.c:400
#define USEMEM(state, amt)
Definition: tuplesort.c:401
static bool grow_memtuples(Tuplesortstate *state)
Definition: tuplesort.c:1052
static void make_bounded_heap(Tuplesortstate *state)
Definition: tuplesort.c:2587
#define LEADER(state)
Definition: tuplesort.c:405
static bool consider_abort_common(Tuplesortstate *state)
Definition: tuplesort.c:1319

References Assert, CHECK_FOR_INTERRUPTS, COMPARETUP, consider_abort_common(), SortTuple::datum1, dumptuples(), elog, ERROR, free_sort_tuple(), grow_memtuples(), inittapes(), LACKMEM, LEADER, LOG, make_bounded_heap(), MemoryContextSwitchTo(), pg_rusage_show(), REMOVEABBREV, trace_sort, TSS_BOUNDED, TSS_BUILDRUNS, TSS_INITIAL, tuplesort_heap_replace_top(), and USEMEM.

Referenced by tuplesort_putbrintuple(), tuplesort_putdatum(), tuplesort_putheaptuple(), tuplesort_putindextuplevalues(), and tuplesort_puttupleslot().

◆ tuplesort_puttupleslot()

void tuplesort_puttupleslot ( Tuplesortstate state,
TupleTableSlot slot 
)

Definition at line 655 of file tuplesortvariants.c.

656{
659 TupleDesc tupDesc = (TupleDesc) base->arg;
660 SortTuple stup;
661 MinimalTuple tuple;
662 HeapTupleData htup;
663 Size tuplen;
664
665 /* copy the tuple into sort storage */
666 tuple = ExecCopySlotMinimalTuple(slot);
667 stup.tuple = tuple;
668 /* set up first-column key value */
669 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
670 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
671 stup.datum1 = heap_getattr(&htup,
672 base->sortKeys[0].ssup_attno,
673 tupDesc,
674 &stup.isnull1);
675
676 /* GetMemoryChunkSpace is not supported for bump contexts */
678 tuplen = MAXALIGN(tuple->t_len);
679 else
680 tuplen = GetMemoryChunkSpace(tuple);
681
683 base->sortKeys->abbrev_converter &&
684 !stup.isnull1, tuplen);
685
686 MemoryContextSwitchTo(oldcontext);
687}
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MINIMAL_TUPLE_OFFSET
Definition: htup_details.h:617
struct TupleDescData * TupleDesc
Definition: tupdesc.h:137
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:492

References SortSupportData::abbrev_converter, TuplesortPublic::arg, ExecCopySlotMinimalTuple(), GetMemoryChunkSpace(), heap_getattr(), MAXALIGN, MemoryContextSwitchTo(), MINIMAL_TUPLE_OFFSET, TuplesortPublic::sortKeys, TuplesortPublic::sortopt, SortSupportData::ssup_attno, MinimalTupleData::t_len, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, and TupleSortUseBumpTupleCxt.

Referenced by ExecEvalAggOrderedTransTuple(), ExecIncrementalSort(), ExecSort(), fetch_input_tuple(), hypothetical_dense_rank_final(), hypothetical_rank_common(), ordered_set_transition_multi(), and switchToPresortedPrefixMode().

◆ tuplesort_readtup_alloc()

void * tuplesort_readtup_alloc ( Tuplesortstate state,
Size  tuplen 
)

Definition at line 2883 of file tuplesort.c.

2884{
2885 SlabSlot *buf;
2886
2887 /*
2888 * We pre-allocate enough slots in the slab arena that we should never run
2889 * out.
2890 */
2891 Assert(state->slabFreeHead);
2892
2893 if (tuplen > SLAB_SLOT_SIZE || !state->slabFreeHead)
2894 return MemoryContextAlloc(state->base.sortcontext, tuplen);
2895 else
2896 {
2897 buf = state->slabFreeHead;
2898 /* Reuse this slot */
2899 state->slabFreeHead = buf->nextfree;
2900
2901 return buf;
2902 }
2903}
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
static char * buf
Definition: pg_test_fsync.c:72
#define SLAB_SLOT_SIZE
Definition: tuplesort.c:142

References Assert, buf, MemoryContextAlloc(), and SLAB_SLOT_SIZE.

Referenced by readtup_cluster(), readtup_datum(), readtup_heap(), readtup_index(), and readtup_index_brin().

◆ tuplesort_rescan()

void tuplesort_rescan ( Tuplesortstate state)

Definition at line 2402 of file tuplesort.c.

2403{
2404 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2405
2406 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2407
2408 switch (state->status)
2409 {
2410 case TSS_SORTEDINMEM:
2411 state->current = 0;
2412 state->eof_reached = false;
2413 state->markpos_offset = 0;
2414 state->markpos_eof = false;
2415 break;
2416 case TSS_SORTEDONTAPE:
2417 LogicalTapeRewindForRead(state->result_tape, 0);
2418 state->eof_reached = false;
2419 state->markpos_block = 0L;
2420 state->markpos_offset = 0;
2421 state->markpos_eof = false;
2422 break;
2423 default:
2424 elog(ERROR, "invalid tuplesort state");
2425 break;
2426 }
2427
2428 MemoryContextSwitchTo(oldcontext);
2429}
void LogicalTapeRewindForRead(LogicalTape *lt, size_t buffer_size)
Definition: logtape.c:846

References Assert, elog, ERROR, LogicalTapeRewindForRead(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecReScanSort(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), and percentile_disc_multi_final().

◆ tuplesort_reset()

void tuplesort_reset ( Tuplesortstate state)

Definition at line 1019 of file tuplesort.c.

1020{
1023
1024 /*
1025 * After we've freed up per-batch memory, re-setup all of the state common
1026 * to both the first batch and any subsequent batch.
1027 */
1029
1030 state->lastReturnedTuple = NULL;
1031 state->slabMemoryBegin = NULL;
1032 state->slabMemoryEnd = NULL;
1033 state->slabFreeHead = NULL;
1034}

References tuplesort_begin_batch(), tuplesort_free(), and tuplesort_updatemax().

Referenced by ExecIncrementalSort(), ExecReScanIncrementalSort(), and switchToPresortedPrefixMode().

◆ tuplesort_restorepos()

void tuplesort_restorepos ( Tuplesortstate state)

Definition at line 2466 of file tuplesort.c.

2467{
2468 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2469
2470 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2471
2472 switch (state->status)
2473 {
2474 case TSS_SORTEDINMEM:
2475 state->current = state->markpos_offset;
2476 state->eof_reached = state->markpos_eof;
2477 break;
2478 case TSS_SORTEDONTAPE:
2479 LogicalTapeSeek(state->result_tape,
2480 state->markpos_block,
2481 state->markpos_offset);
2482 state->eof_reached = state->markpos_eof;
2483 break;
2484 default:
2485 elog(ERROR, "invalid tuplesort state");
2486 break;
2487 }
2488
2489 MemoryContextSwitchTo(oldcontext);
2490}
void LogicalTapeSeek(LogicalTape *lt, int64 blocknum, int offset)
Definition: logtape.c:1133

References Assert, elog, ERROR, LogicalTapeSeek(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecSortRestrPos().

◆ tuplesort_set_bound()

void tuplesort_set_bound ( Tuplesortstate state,
int64  bound 
)

Definition at line 838 of file tuplesort.c.

839{
840 /* Assert we're called before loading any tuples */
841 Assert(state->status == TSS_INITIAL && state->memtupcount == 0);
842 /* Assert we allow bounded sorts */
843 Assert(state->base.sortopt & TUPLESORT_ALLOWBOUNDED);
844 /* Can't set the bound twice, either */
845 Assert(!state->bounded);
846 /* Also, this shouldn't be called in a parallel worker */
848
849 /* Parallel leader allows but ignores hint */
850 if (LEADER(state))
851 return;
852
853#ifdef DEBUG_BOUNDED_SORT
854 /* Honor GUC setting that disables the feature (for easy testing) */
855 if (!optimize_bounded_sort)
856 return;
857#endif
858
859 /* We want to be able to compute bound * 2, so limit the setting */
860 if (bound > (int64) (INT_MAX / 2))
861 return;
862
863 state->bounded = true;
864 state->bound = (int) bound;
865
866 /*
867 * Bounded sorts are not an effective target for abbreviated key
868 * optimization. Disable by setting state to be consistent with no
869 * abbreviation support.
870 */
871 state->base.sortKeys->abbrev_converter = NULL;
872 if (state->base.sortKeys->abbrev_full_comparator)
873 state->base.sortKeys->comparator = state->base.sortKeys->abbrev_full_comparator;
874
875 /* Not strictly necessary, but be tidy */
876 state->base.sortKeys->abbrev_abort = NULL;
877 state->base.sortKeys->abbrev_full_comparator = NULL;
878}
#define TUPLESORT_ALLOWBOUNDED
Definition: tuplesort.h:99

References Assert, LEADER, TSS_INITIAL, TUPLESORT_ALLOWBOUNDED, and WORKER.

Referenced by ExecIncrementalSort(), ExecSort(), and switchToPresortedPrefixMode().

◆ tuplesort_skiptuples()

bool tuplesort_skiptuples ( Tuplesortstate state,
int64  ntuples,
bool  forward 
)

Definition at line 1710 of file tuplesort.c.

1711{
1712 MemoryContext oldcontext;
1713
1714 /*
1715 * We don't actually support backwards skip yet, because no callers need
1716 * it. The API is designed to allow for that later, though.
1717 */
1718 Assert(forward);
1719 Assert(ntuples >= 0);
1720 Assert(!WORKER(state));
1721
1722 switch (state->status)
1723 {
1724 case TSS_SORTEDINMEM:
1725 if (state->memtupcount - state->current >= ntuples)
1726 {
1727 state->current += ntuples;
1728 return true;
1729 }
1730 state->current = state->memtupcount;
1731 state->eof_reached = true;
1732
1733 /*
1734 * Complain if caller tries to retrieve more tuples than
1735 * originally asked for in a bounded sort. This is because
1736 * returning EOF here might be the wrong thing.
1737 */
1738 if (state->bounded && state->current >= state->bound)
1739 elog(ERROR, "retrieved too many tuples in a bounded sort");
1740
1741 return false;
1742
1743 case TSS_SORTEDONTAPE:
1744 case TSS_FINALMERGE:
1745
1746 /*
1747 * We could probably optimize these cases better, but for now it's
1748 * not worth the trouble.
1749 */
1750 oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1751 while (ntuples-- > 0)
1752 {
1753 SortTuple stup;
1754
1755 if (!tuplesort_gettuple_common(state, forward, &stup))
1756 {
1757 MemoryContextSwitchTo(oldcontext);
1758 return false;
1759 }
1761 }
1762 MemoryContextSwitchTo(oldcontext);
1763 return true;
1764
1765 default:
1766 elog(ERROR, "invalid tuplesort state");
1767 return false; /* keep compiler quiet */
1768 }
1769}

References Assert, CHECK_FOR_INTERRUPTS, elog, ERROR, MemoryContextSwitchTo(), TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, tuplesort_gettuple_common(), and WORKER.

Referenced by percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), and percentile_disc_multi_final().

◆ tuplesort_space_type_name()

const char * tuplesort_space_type_name ( TuplesortSpaceType  t)

Definition at line 2566 of file tuplesort.c.

2567{
2569 return t == SORT_SPACE_TYPE_DISK ? "Disk" : "Memory";
2570}

References Assert, SORT_SPACE_TYPE_DISK, and SORT_SPACE_TYPE_MEMORY.

Referenced by show_incremental_sort_group_info(), and show_sort_info().

◆ tuplesort_used_bound()

bool tuplesort_used_bound ( Tuplesortstate state)

Definition at line 886 of file tuplesort.c.

887{
888 return state->boundUsed;
889}

Referenced by ExecIncrementalSort().