PostgreSQL Source Code git master
Loading...
Searching...
No Matches
tuplesort.h File Reference
#include "access/itup.h"
#include "executor/instrument_node.h"
#include "executor/tuptable.h"
#include "storage/dsm.h"
#include "utils/logtape.h"
#include "utils/relcache.h"
#include "utils/sortsupport.h"
Include dependency graph for tuplesort.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  SortCoordinateData
 
struct  SortTuple
 
struct  TuplesortPublic
 

Macros

#define TUPLESORT_NONE   0
 
#define TUPLESORT_RANDOMACCESS   (1 << 0)
 
#define TUPLESORT_ALLOWBOUNDED   (1 << 1)
 
#define TupleSortUseBumpTupleCxt(opt)   (((opt) & TUPLESORT_ALLOWBOUNDED) == 0)
 
#define PARALLEL_SORT(coordinate)
 
#define TuplesortstateGetPublic(state)   ((TuplesortPublic *) state)
 
#define LogicalTapeReadExact(tape, ptr, len)
 

Typedefs

typedef struct BrinTuple BrinTuple
 
typedef struct GinTuple GinTuple
 
typedef struct Tuplesortstate Tuplesortstate
 
typedef struct Sharedsort Sharedsort
 
typedef struct SortCoordinateData SortCoordinateData
 
typedef struct SortCoordinateDataSortCoordinate
 
typedef int(* SortTupleComparator) (const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
 

Functions

Tuplesortstatetuplesort_begin_common (int workMem, SortCoordinate coordinate, int sortopt)
 
void tuplesort_set_bound (Tuplesortstate *state, int64 bound)
 
bool tuplesort_used_bound (Tuplesortstate *state)
 
void tuplesort_puttuple_common (Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
 
void tuplesort_performsort (Tuplesortstate *state)
 
bool tuplesort_gettuple_common (Tuplesortstate *state, bool forward, SortTuple *stup)
 
bool tuplesort_skiptuples (Tuplesortstate *state, int64 ntuples, bool forward)
 
void tuplesort_end (Tuplesortstate *state)
 
void tuplesort_reset (Tuplesortstate *state)
 
void tuplesort_get_stats (Tuplesortstate *state, TuplesortInstrumentation *stats)
 
const chartuplesort_method_name (TuplesortMethod m)
 
const chartuplesort_space_type_name (TuplesortSpaceType t)
 
int tuplesort_merge_order (int64 allowedMem)
 
Size tuplesort_estimate_shared (int nWorkers)
 
void tuplesort_initialize_shared (Sharedsort *shared, int nWorkers, dsm_segment *seg)
 
void tuplesort_attach_shared (Sharedsort *shared, dsm_segment *seg)
 
void tuplesort_rescan (Tuplesortstate *state)
 
void tuplesort_markpos (Tuplesortstate *state)
 
void tuplesort_restorepos (Tuplesortstate *state)
 
voidtuplesort_readtup_alloc (Tuplesortstate *state, Size tuplen)
 
Tuplesortstatetuplesort_begin_heap (TupleDesc tupDesc, int nkeys, AttrNumber *attNums, Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_cluster (TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_btree (Relation heapRel, Relation indexRel, bool enforceUnique, bool uniqueNullsNotDistinct, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_hash (Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, uint32 max_buckets, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_gist (Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_brin (int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_index_gin (Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
 
Tuplesortstatetuplesort_begin_datum (Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, SortCoordinate coordinate, int sortopt)
 
void tuplesort_puttupleslot (Tuplesortstate *state, TupleTableSlot *slot)
 
void tuplesort_putheaptuple (Tuplesortstate *state, HeapTuple tup)
 
void tuplesort_putindextuplevalues (Tuplesortstate *state, Relation rel, const ItemPointerData *self, const Datum *values, const bool *isnull)
 
void tuplesort_putbrintuple (Tuplesortstate *state, BrinTuple *tuple, Size size)
 
void tuplesort_putgintuple (Tuplesortstate *state, GinTuple *tuple, Size size)
 
void tuplesort_putdatum (Tuplesortstate *state, Datum val, bool isNull)
 
bool tuplesort_gettupleslot (Tuplesortstate *state, bool forward, bool copy, TupleTableSlot *slot, Datum *abbrev)
 
HeapTuple tuplesort_getheaptuple (Tuplesortstate *state, bool forward)
 
IndexTuple tuplesort_getindextuple (Tuplesortstate *state, bool forward)
 
BrinTupletuplesort_getbrintuple (Tuplesortstate *state, Size *len, bool forward)
 
GinTupletuplesort_getgintuple (Tuplesortstate *state, Size *len, bool forward)
 
bool tuplesort_getdatum (Tuplesortstate *state, bool forward, bool copy, Datum *val, bool *isNull, Datum *abbrev)
 

Macro Definition Documentation

◆ LogicalTapeReadExact

#define LogicalTapeReadExact (   tape,
  ptr,
  len 
)
Value:
do { \
if (LogicalTapeRead(tape, ptr, len) != (size_t) (len)) \
elog(ERROR, "unexpected end of data"); \
} while(0)
#define ERROR
Definition elog.h:39
size_t LogicalTapeRead(LogicalTape *lt, void *ptr, size_t size)
Definition logtape.c:928
const void size_t len
static int fb(int x)

Definition at line 230 of file tuplesort.h.

231 { \
232 if (LogicalTapeRead(tape, ptr, len) != (size_t) (len)) \
233 elog(ERROR, "unexpected end of data"); \
234 } while(0)

◆ PARALLEL_SORT

#define PARALLEL_SORT (   coordinate)
Value:
(coordinate == NULL || \
(coordinate)->sharedsort == NULL ? 0 : \
(coordinate)->isWorker ? 1 : 2)

Definition at line 223 of file tuplesort.h.

224 : \
225 (coordinate)->isWorker ? 1 : 2)

◆ TUPLESORT_ALLOWBOUNDED

#define TUPLESORT_ALLOWBOUNDED   (1 << 1)

Definition at line 73 of file tuplesort.h.

◆ TUPLESORT_NONE

#define TUPLESORT_NONE   0

Definition at line 67 of file tuplesort.h.

◆ TUPLESORT_RANDOMACCESS

#define TUPLESORT_RANDOMACCESS   (1 << 0)

Definition at line 70 of file tuplesort.h.

◆ TuplesortstateGetPublic

#define TuplesortstateGetPublic (   state)    ((TuplesortPublic *) state)

Definition at line 227 of file tuplesort.h.

◆ TupleSortUseBumpTupleCxt

#define TupleSortUseBumpTupleCxt (   opt)    (((opt) & TUPLESORT_ALLOWBOUNDED) == 0)

Definition at line 82 of file tuplesort.h.

Typedef Documentation

◆ BrinTuple

Definition at line 33 of file tuplesort.h.

◆ GinTuple

Definition at line 34 of file tuplesort.h.

◆ Sharedsort

Definition at line 41 of file tuplesort.h.

◆ SortCoordinate

Definition at line 64 of file tuplesort.h.

◆ SortCoordinateData

◆ SortTupleComparator

typedef int(* SortTupleComparator) (const SortTuple *a, const SortTuple *b, Tuplesortstate *state)

Definition at line 123 of file tuplesort.h.

◆ Tuplesortstate

Definition at line 40 of file tuplesort.h.

Function Documentation

◆ tuplesort_attach_shared()

void tuplesort_attach_shared ( Sharedsort shared,
dsm_segment seg 
)
extern

Definition at line 3233 of file tuplesort.c.

3234{
3235 /* Attach to SharedFileSet */
3236 SharedFileSetAttach(&shared->fileset, seg);
3237}
void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg)
SharedFileSet fileset
Definition tuplesort.c:357

References Sharedsort::fileset, and SharedFileSetAttach().

Referenced by _brin_parallel_build_main(), _bt_parallel_build_main(), and _gin_parallel_build_main().

◆ tuplesort_begin_cluster()

Tuplesortstate * tuplesort_begin_cluster ( TupleDesc  tupDesc,
Relation  indexRel,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)
extern

Definition at line 254 of file tuplesortvariants.c.

258{
260 sortopt);
263 MemoryContext oldcontext;
265 int i;
266
267 Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
268
269 oldcontext = MemoryContextSwitchTo(base->maincontext);
271
272 if (trace_sort)
273 elog(LOG,
274 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
276 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
277
279
281 false, /* no unique check */
282 base->nKeys,
283 workMem,
284 sortopt & TUPLESORT_RANDOMACCESS,
286
291 base->readtup = readtup_cluster;
293 base->arg = arg;
294
295 arg->indexInfo = BuildIndexInfo(indexRel);
296
297 /*
298 * If we don't have a simple leading attribute, we don't currently
299 * initialize datum1, so disable optimizations that require it.
300 */
301 if (arg->indexInfo->ii_IndexAttrNumbers[0] == 0)
302 base->haveDatum1 = false;
303 else
304 base->haveDatum1 = true;
305
306 arg->tupDesc = tupDesc; /* assume we need not copy tupDesc */
307
308 indexScanKey = _bt_mkscankey(indexRel, NULL);
309
310 if (arg->indexInfo->ii_Expressions != NULL)
311 {
312 TupleTableSlot *slot;
313 ExprContext *econtext;
314
315 /*
316 * We will need to use FormIndexDatum to evaluate the index
317 * expressions. To do that, we need an EState, as well as a
318 * TupleTableSlot to put the table tuples into. The econtext's
319 * scantuple has to point to that slot, too.
320 */
321 arg->estate = CreateExecutorState();
323 econtext = GetPerTupleExprContext(arg->estate);
324 econtext->ecxt_scantuple = slot;
325 }
326
327 /* Prepare SortSupport data for each column */
328 base->sortKeys = (SortSupport) palloc0(base->nKeys *
329 sizeof(SortSupportData));
330
331 for (i = 0; i < base->nKeys; i++)
332 {
333 SortSupport sortKey = base->sortKeys + i;
334 ScanKey scanKey = indexScanKey->scankeys + i;
335 bool reverse;
336
337 sortKey->ssup_cxt = CurrentMemoryContext;
338 sortKey->ssup_collation = scanKey->sk_collation;
339 sortKey->ssup_nulls_first =
340 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
341 sortKey->ssup_attno = scanKey->sk_attno;
342 /* Convey if abbreviation optimization is applicable in principle */
343 sortKey->abbreviate = (i == 0 && base->haveDatum1);
344
345 Assert(sortKey->ssup_attno != 0);
346
347 reverse = (scanKey->sk_flags & SK_BT_DESC) != 0;
348
349 PrepareSortSupportFromIndexRel(indexRel, reverse, sortKey);
350 }
351
353
354 MemoryContextSwitchTo(oldcontext);
355
356 return state;
357}
#define Assert(condition)
Definition c.h:885
#define LOG
Definition elog.h:31
#define elog(elevel,...)
Definition elog.h:226
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
const TupleTableSlotOps TTSOpsHeapTuple
Definition execTuples.c:85
EState * CreateExecutorState(void)
Definition execUtils.c:88
#define GetPerTupleExprContext(estate)
Definition executor.h:656
#define palloc0_object(type)
Definition fe_memutils.h:75
IndexInfo * BuildIndexInfo(Relation index)
Definition index.c:2426
int i
Definition isn.c:77
void pfree(void *pointer)
Definition mcxt.c:1616
void * palloc0(Size size)
Definition mcxt.c:1417
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
#define SK_BT_NULLS_FIRST
Definition nbtree.h:1117
#define SK_BT_DESC
Definition nbtree.h:1116
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition nbtutils.c:59
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
void * arg
#define RelationGetNumberOfAttributes(relation)
Definition rel.h:520
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition rel.h:533
void PrepareSortSupportFromIndexRel(Relation indexRel, bool reverse, SortSupport ssup)
struct SortSupportData * SortSupport
Definition sortsupport.h:58
TupleTableSlot * ecxt_scantuple
Definition execnodes.h:275
Form_pg_class rd_rel
Definition rel.h:111
MemoryContext maincontext
Definition tuplesort.h:186
void(* writetup)(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
Definition tuplesort.h:162
void(* removeabbrev)(Tuplesortstate *state, SortTuple *stups, int count)
Definition tuplesort.h:155
void(* freestate)(Tuplesortstate *state)
Definition tuplesort.h:180
void(* readtup)(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
Definition tuplesort.h:171
SortTupleComparator comparetup
Definition tuplesort.h:142
SortSupport sortKeys
Definition tuplesort.h:203
SortTupleComparator comparetup_tiebreak
Definition tuplesort.h:149
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
Definition tuplesort.c:546
bool trace_sort
Definition tuplesort.c:122
#define PARALLEL_SORT(coordinate)
Definition tuplesort.h:223
#define TUPLESORT_RANDOMACCESS
Definition tuplesort.h:70
#define TuplesortstateGetPublic(state)
Definition tuplesort.h:227
static int comparetup_cluster_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_cluster(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int tuplen)
#define CLUSTER_SORT
static void freestate_cluster(Tuplesortstate *state)
static int comparetup_cluster(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void writetup_cluster(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void removeabbrev_cluster(Tuplesortstate *state, SortTuple *stups, int count)

References _bt_mkscankey(), arg, TuplesortPublic::arg, Assert, BuildIndexInfo(), CLUSTER_SORT, TuplesortPublic::comparetup, comparetup_cluster(), comparetup_cluster_tiebreak(), TuplesortPublic::comparetup_tiebreak, CreateExecutorState(), CurrentMemoryContext, ExprContext::ecxt_scantuple, elog, fb(), TuplesortPublic::freestate, freestate_cluster(), GetPerTupleExprContext, TuplesortPublic::haveDatum1, i, IndexRelationGetNumberOfKeyAttributes, LOG, TuplesortPublic::maincontext, MakeSingleTupleTableSlot(), MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc0(), palloc0_object, PARALLEL_SORT, pfree(), PrepareSortSupportFromIndexRel(), RelationData::rd_rel, TuplesortPublic::readtup, readtup_cluster(), RelationGetNumberOfAttributes, TuplesortPublic::removeabbrev, removeabbrev_cluster(), SK_BT_DESC, SK_BT_NULLS_FIRST, TuplesortPublic::sortKeys, trace_sort, TTSOpsHeapTuple, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_cluster().

Referenced by heapam_relation_copy_for_cluster().

◆ tuplesort_begin_common()

Tuplesortstate * tuplesort_begin_common ( int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)
extern

Definition at line 546 of file tuplesort.c.

547{
549 MemoryContext maincontext;
550 MemoryContext sortcontext;
551 MemoryContext oldcontext;
552
553 /* See leader_takeover_tapes() remarks on random access support */
554 if (coordinate && (sortopt & TUPLESORT_RANDOMACCESS))
555 elog(ERROR, "random access disallowed under parallel sort");
556
557 /*
558 * Memory context surviving tuplesort_reset. This memory context holds
559 * data which is useful to keep while sorting multiple similar batches.
560 */
562 "TupleSort main",
564
565 /*
566 * Create a working memory context for one sort operation. The content of
567 * this context is deleted by tuplesort_reset.
568 */
569 sortcontext = AllocSetContextCreate(maincontext,
570 "TupleSort sort",
572
573 /*
574 * Additionally a working memory context for tuples is setup in
575 * tuplesort_begin_batch.
576 */
577
578 /*
579 * Make the Tuplesortstate within the per-sortstate context. This way, we
580 * don't need a separate pfree() operation for it at shutdown.
581 */
582 oldcontext = MemoryContextSwitchTo(maincontext);
583
585
586 if (trace_sort)
587 pg_rusage_init(&state->ru_start);
588
589 state->base.sortopt = sortopt;
590 state->base.tuples = true;
591 state->abbrevNext = 10;
592
593 /*
594 * workMem is forced to be at least 64KB, the current minimum valid value
595 * for the work_mem GUC. This is a defense against parallel sort callers
596 * that divide out memory among many workers in a way that leaves each
597 * with very little memory.
598 */
599 state->allowedMem = Max(workMem, 64) * (int64) 1024;
600 state->base.sortcontext = sortcontext;
601 state->base.maincontext = maincontext;
602
603 state->memtupsize = INITIAL_MEMTUPSIZE;
604 state->memtuples = NULL;
605
606 /*
607 * After all of the other non-parallel-related state, we setup all of the
608 * state needed for each batch.
609 */
611
612 /*
613 * Initialize parallel-related state based on coordination information
614 * from caller
615 */
616 if (!coordinate)
617 {
618 /* Serial sort */
619 state->shared = NULL;
620 state->worker = -1;
621 state->nParticipants = -1;
622 }
623 else if (coordinate->isWorker)
624 {
625 /* Parallel worker produces exactly one final run from all input */
626 state->shared = coordinate->sharedsort;
628 state->nParticipants = -1;
629 }
630 else
631 {
632 /* Parallel leader state only used for final merge */
633 state->shared = coordinate->sharedsort;
634 state->worker = -1;
635 state->nParticipants = coordinate->nParticipants;
636 Assert(state->nParticipants >= 1);
637 }
638
639 MemoryContextSwitchTo(oldcontext);
640
641 return state;
642}
#define Max(x, y)
Definition c.h:1013
int64_t int64
Definition c.h:555
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
void pg_rusage_init(PGRUsage *ru0)
Definition pg_rusage.c:27
#define INITIAL_MEMTUPSIZE
Definition tuplesort.c:118
static int worker_get_identifier(Tuplesortstate *state)
Definition tuplesort.c:3253
static void tuplesort_begin_batch(Tuplesortstate *state)
Definition tuplesort.c:652

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert, CurrentMemoryContext, elog, ERROR, fb(), INITIAL_MEMTUPSIZE, Max, MemoryContextSwitchTo(), palloc0_object, pg_rusage_init(), trace_sort, tuplesort_begin_batch(), TUPLESORT_RANDOMACCESS, and worker_get_identifier().

Referenced by tuplesort_begin_cluster(), tuplesort_begin_datum(), tuplesort_begin_heap(), tuplesort_begin_index_brin(), tuplesort_begin_index_btree(), tuplesort_begin_index_gin(), tuplesort_begin_index_gist(), and tuplesort_begin_index_hash().

◆ tuplesort_begin_datum()

Tuplesortstate * tuplesort_begin_datum ( Oid  datumType,
Oid  sortOperator,
Oid  sortCollation,
bool  nullsFirstFlag,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)
extern

Definition at line 669 of file tuplesortvariants.c.

672{
674 sortopt);
677 MemoryContext oldcontext;
678 int16 typlen;
679 bool typbyval;
680
681 oldcontext = MemoryContextSwitchTo(base->maincontext);
683
684 if (trace_sort)
685 elog(LOG,
686 "begin datum sort: workMem = %d, randomAccess = %c",
687 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
688
689 base->nKeys = 1; /* always a one-column sort */
690
692 false, /* no unique check */
693 1,
694 workMem,
695 sortopt & TUPLESORT_RANDOMACCESS,
697
701 base->writetup = writetup_datum;
702 base->readtup = readtup_datum;
703 base->haveDatum1 = true;
704 base->arg = arg;
705
706 arg->datumType = datumType;
707
708 /* lookup necessary attributes of the datum type */
709 get_typlenbyval(datumType, &typlen, &typbyval);
710 arg->datumTypeLen = typlen;
711 base->tuples = !typbyval;
712
713 /* Prepare SortSupport data */
715
717 base->sortKeys->ssup_collation = sortCollation;
719
720 /*
721 * Abbreviation is possible here only for by-reference types. In theory,
722 * a pass-by-value datatype could have an abbreviated form that is cheaper
723 * to compare. In a tuple sort, we could support that, because we can
724 * always extract the original datum from the tuple as needed. Here, we
725 * can't, because a datum sort only stores a single copy of the datum; the
726 * "tuple" field of each SortTuple is NULL.
727 */
728 base->sortKeys->abbreviate = !typbyval;
729
730 PrepareSortSupportFromOrderingOp(sortOperator, base->sortKeys);
731
732 /*
733 * The "onlyKey" optimization cannot be used with abbreviated keys, since
734 * tie-breaker comparisons may be required. Typically, the optimization
735 * is only of value to pass-by-value types anyway, whereas abbreviated
736 * keys are typically only of value to pass-by-reference types.
737 */
738 if (!base->sortKeys->abbrev_converter)
739 base->onlyKey = base->sortKeys;
740
741 MemoryContextSwitchTo(oldcontext);
742
743 return state;
744}
int16_t int16
Definition c.h:553
#define palloc_object(type)
Definition fe_memutils.h:74
void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
Definition lsyscache.c:2401
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
Datum(* abbrev_converter)(Datum original, SortSupport ssup)
MemoryContext ssup_cxt
Definition sortsupport.h:66
SortSupport onlyKey
Definition tuplesort.h:213
static void removeabbrev_datum(Tuplesortstate *state, SortTuple *stups, int count)
static int comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_datum_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_datum(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void writetup_datum(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
#define DATUM_SORT

References SortSupportData::abbrev_converter, SortSupportData::abbreviate, arg, TuplesortPublic::arg, TuplesortPublic::comparetup, comparetup_datum(), comparetup_datum_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, DATUM_SORT, elog, fb(), get_typlenbyval(), TuplesortPublic::haveDatum1, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, TuplesortPublic::onlyKey, palloc0_object, palloc_object, PARALLEL_SORT, PrepareSortSupportFromOrderingOp(), TuplesortPublic::readtup, readtup_datum(), TuplesortPublic::removeabbrev, removeabbrev_datum(), TuplesortPublic::sortKeys, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, trace_sort, TuplesortPublic::tuples, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_datum().

Referenced by array_sort_internal(), ExecSort(), initialize_aggregate(), ordered_set_startup(), and validate_index().

◆ tuplesort_begin_heap()

Tuplesortstate * tuplesort_begin_heap ( TupleDesc  tupDesc,
int  nkeys,
AttrNumber attNums,
Oid sortOperators,
Oid sortCollations,
bool nullsFirstFlags,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)
extern

Definition at line 181 of file tuplesortvariants.c.

186{
188 sortopt);
190 MemoryContext oldcontext;
191 int i;
192
193 oldcontext = MemoryContextSwitchTo(base->maincontext);
194
195 Assert(nkeys > 0);
196
197 if (trace_sort)
198 elog(LOG,
199 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
200 nkeys, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
201
202 base->nKeys = nkeys;
203
205 false, /* no unique check */
206 nkeys,
207 workMem,
208 sortopt & TUPLESORT_RANDOMACCESS,
210
214 base->writetup = writetup_heap;
215 base->readtup = readtup_heap;
216 base->haveDatum1 = true;
217 base->arg = tupDesc; /* assume we need not copy tupDesc */
218
219 /* Prepare SortSupport data for each column */
220 base->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
221
222 for (i = 0; i < nkeys; i++)
223 {
224 SortSupport sortKey = base->sortKeys + i;
225
226 Assert(attNums[i] != 0);
227 Assert(sortOperators[i] != 0);
228
229 sortKey->ssup_cxt = CurrentMemoryContext;
230 sortKey->ssup_collation = sortCollations[i];
231 sortKey->ssup_nulls_first = nullsFirstFlags[i];
232 sortKey->ssup_attno = attNums[i];
233 /* Convey if abbreviation optimization is applicable in principle */
234 sortKey->abbreviate = (i == 0 && base->haveDatum1);
235
237 }
238
239 /*
240 * The "onlyKey" optimization cannot be used with abbreviated keys, since
241 * tie-breaker comparisons may be required. Typically, the optimization
242 * is only of value to pass-by-value types anyway, whereas abbreviated
243 * keys are typically only of value to pass-by-reference types.
244 */
245 if (nkeys == 1 && !base->sortKeys->abbrev_converter)
246 base->onlyKey = base->sortKeys;
247
248 MemoryContextSwitchTo(oldcontext);
249
250 return state;
251}
static void readtup_heap(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static int comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void writetup_heap(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static int comparetup_heap_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void removeabbrev_heap(Tuplesortstate *state, SortTuple *stups, int count)
#define HEAP_SORT

References SortSupportData::abbrev_converter, TuplesortPublic::arg, Assert, TuplesortPublic::comparetup, comparetup_heap(), comparetup_heap_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, elog, fb(), TuplesortPublic::haveDatum1, HEAP_SORT, i, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, TuplesortPublic::onlyKey, palloc0(), PARALLEL_SORT, PrepareSortSupportFromOrderingOp(), TuplesortPublic::readtup, readtup_heap(), TuplesortPublic::removeabbrev, removeabbrev_heap(), TuplesortPublic::sortKeys, trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_heap().

Referenced by ExecIncrementalSort(), ExecSort(), initialize_aggregate(), initialize_phase(), ordered_set_startup(), and switchToPresortedPrefixMode().

◆ tuplesort_begin_index_brin()

Tuplesortstate * tuplesort_begin_index_brin ( int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)
extern

Definition at line 556 of file tuplesortvariants.c.

559{
561 sortopt);
563
564 if (trace_sort)
565 elog(LOG,
566 "begin index sort: workMem = %d, randomAccess = %c",
567 workMem,
568 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
569
570 base->nKeys = 1; /* Only one sort column, the block number */
571
576 base->haveDatum1 = true;
577 base->arg = NULL;
578
579 return state;
580}
static void writetup_index_brin(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void removeabbrev_index_brin(Tuplesortstate *state, SortTuple *stups, int count)
static void readtup_index_brin(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static int comparetup_index_brin(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)

References TuplesortPublic::arg, TuplesortPublic::comparetup, comparetup_index_brin(), elog, fb(), TuplesortPublic::haveDatum1, LOG, TuplesortPublic::nKeys, TuplesortPublic::readtup, readtup_index_brin(), TuplesortPublic::removeabbrev, removeabbrev_index_brin(), trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index_brin().

Referenced by _brin_parallel_scan_and_build(), and brinbuild().

◆ tuplesort_begin_index_btree()

Tuplesortstate * tuplesort_begin_index_btree ( Relation  heapRel,
Relation  indexRel,
bool  enforceUnique,
bool  uniqueNullsNotDistinct,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)
extern

Definition at line 360 of file tuplesortvariants.c.

367{
369 sortopt);
373 MemoryContext oldcontext;
374 int i;
375
376 oldcontext = MemoryContextSwitchTo(base->maincontext);
378
379 if (trace_sort)
380 elog(LOG,
381 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
382 enforceUnique ? 't' : 'f',
383 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
384
386
388 enforceUnique,
389 base->nKeys,
390 workMem,
391 sortopt & TUPLESORT_RANDOMACCESS,
393
397 base->writetup = writetup_index;
398 base->readtup = readtup_index;
399 base->haveDatum1 = true;
400 base->arg = arg;
401
402 arg->index.heapRel = heapRel;
403 arg->index.indexRel = indexRel;
404 arg->enforceUnique = enforceUnique;
405 arg->uniqueNullsNotDistinct = uniqueNullsNotDistinct;
406
407 indexScanKey = _bt_mkscankey(indexRel, NULL);
408
409 /* Prepare SortSupport data for each column */
410 base->sortKeys = (SortSupport) palloc0(base->nKeys *
411 sizeof(SortSupportData));
412
413 for (i = 0; i < base->nKeys; i++)
414 {
415 SortSupport sortKey = base->sortKeys + i;
416 ScanKey scanKey = indexScanKey->scankeys + i;
417 bool reverse;
418
419 sortKey->ssup_cxt = CurrentMemoryContext;
420 sortKey->ssup_collation = scanKey->sk_collation;
421 sortKey->ssup_nulls_first =
422 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
423 sortKey->ssup_attno = scanKey->sk_attno;
424 /* Convey if abbreviation optimization is applicable in principle */
425 sortKey->abbreviate = (i == 0 && base->haveDatum1);
426
427 Assert(sortKey->ssup_attno != 0);
428
429 reverse = (scanKey->sk_flags & SK_BT_DESC) != 0;
430
431 PrepareSortSupportFromIndexRel(indexRel, reverse, sortKey);
432 }
433
435
436 MemoryContextSwitchTo(oldcontext);
437
438 return state;
439}
static int comparetup_index_btree_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_btree(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_index(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void removeabbrev_index(Tuplesortstate *state, SortTuple *stups, int count)
#define INDEX_SORT
static void writetup_index(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)

References _bt_mkscankey(), arg, TuplesortPublic::arg, Assert, TuplesortPublic::comparetup, comparetup_index_btree(), comparetup_index_btree_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, elog, fb(), TuplesortPublic::haveDatum1, i, INDEX_SORT, IndexRelationGetNumberOfKeyAttributes, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc0(), palloc_object, PARALLEL_SORT, pfree(), PrepareSortSupportFromIndexRel(), TuplesortPublic::readtup, readtup_index(), TuplesortPublic::removeabbrev, removeabbrev_index(), SK_BT_DESC, SK_BT_NULLS_FIRST, TuplesortPublic::sortKeys, trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index().

Referenced by _bt_parallel_scan_and_sort(), and _bt_spools_heapscan().

◆ tuplesort_begin_index_gin()

Tuplesortstate * tuplesort_begin_index_gin ( Relation  heapRel,
Relation  indexRel,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)
extern

Definition at line 583 of file tuplesortvariants.c.

587{
589 sortopt);
591 MemoryContext oldcontext;
592 int i;
593 TupleDesc desc = RelationGetDescr(indexRel);
594
595 oldcontext = MemoryContextSwitchTo(base->maincontext);
596
597#ifdef TRACE_SORT
598 if (trace_sort)
599 elog(LOG,
600 "begin index sort: workMem = %d, randomAccess = %c",
601 workMem,
602 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
603#endif
604
605 /*
606 * Multi-column GIN indexes expand the row into a separate index entry for
607 * attribute, and that's what we write into the tuplesort. But we still
608 * need to initialize sortsupport for all the attributes.
609 */
611
612 /* Prepare SortSupport data for each column */
613 base->sortKeys = (SortSupport) palloc0(base->nKeys *
614 sizeof(SortSupportData));
615
616 for (i = 0; i < base->nKeys; i++)
617 {
618 SortSupport sortKey = base->sortKeys + i;
620 Oid cmpFunc;
621
622 sortKey->ssup_cxt = CurrentMemoryContext;
623 sortKey->ssup_collation = indexRel->rd_indcollation[i];
624 sortKey->ssup_nulls_first = false;
625 sortKey->ssup_attno = i + 1;
626 sortKey->abbreviate = false;
627
628 Assert(sortKey->ssup_attno != 0);
629
630 if (!OidIsValid(sortKey->ssup_collation))
631 sortKey->ssup_collation = DEFAULT_COLLATION_OID;
632
633 /*
634 * If the compare proc isn't specified in the opclass definition, look
635 * up the index key type's default btree comparator.
636 */
637 cmpFunc = index_getprocid(indexRel, i + 1, GIN_COMPARE_PROC);
638 if (cmpFunc == InvalidOid)
639 {
640 TypeCacheEntry *typentry;
641
642 typentry = lookup_type_cache(att->atttypid,
644 if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid))
647 errmsg("could not identify a comparison function for type %s",
648 format_type_be(att->atttypid))));
649
650 cmpFunc = typentry->cmp_proc_finfo.fn_oid;
651 }
652
654 }
655
660 base->haveDatum1 = false;
661 base->arg = NULL;
662
663 MemoryContextSwitchTo(oldcontext);
664
665 return state;
666}
#define OidIsValid(objectId)
Definition c.h:800
int errcode(int sqlerrcode)
Definition elog.c:864
int errmsg(const char *fmt,...)
Definition elog.c:1081
#define ereport(elevel,...)
Definition elog.h:150
char * format_type_be(Oid type_oid)
#define GIN_COMPARE_PROC
Definition gin.h:24
RegProcedure index_getprocid(Relation irel, AttrNumber attnum, uint16 procnum)
Definition indexam.c:883
FormData_pg_attribute * Form_pg_attribute
#define InvalidOid
unsigned int Oid
#define RelationGetDescr(relation)
Definition rel.h:540
void PrepareSortSupportComparisonShim(Oid cmpFunc, SortSupport ssup)
Definition sortsupport.c:68
Oid fn_oid
Definition fmgr.h:59
Oid * rd_indcollation
Definition rel.h:217
FmgrInfo cmp_proc_finfo
Definition typcache.h:77
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition tupdesc.h:160
static void writetup_index_gin(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void readtup_index_gin(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void removeabbrev_index_gin(Tuplesortstate *state, SortTuple *stups, int count)
static int comparetup_index_gin(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition typcache.c:389
#define TYPECACHE_CMP_PROC_FINFO
Definition typcache.h:144

References TuplesortPublic::arg, Assert, TypeCacheEntry::cmp_proc_finfo, TuplesortPublic::comparetup, comparetup_index_gin(), CurrentMemoryContext, elog, ereport, errcode(), errmsg(), ERROR, fb(), FmgrInfo::fn_oid, format_type_be(), GIN_COMPARE_PROC, TuplesortPublic::haveDatum1, i, index_getprocid(), IndexRelationGetNumberOfKeyAttributes, InvalidOid, LOG, lookup_type_cache(), TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, OidIsValid, palloc0(), PrepareSortSupportComparisonShim(), RelationData::rd_indcollation, TuplesortPublic::readtup, readtup_index_gin(), RelationGetDescr, TuplesortPublic::removeabbrev, removeabbrev_index_gin(), TuplesortPublic::sortKeys, trace_sort, TupleDescAttr(), tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TYPECACHE_CMP_PROC_FINFO, TuplesortPublic::writetup, and writetup_index_gin().

Referenced by _gin_parallel_scan_and_build(), and ginbuild().

◆ tuplesort_begin_index_gist()

Tuplesortstate * tuplesort_begin_index_gist ( Relation  heapRel,
Relation  indexRel,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)
extern

Definition at line 493 of file tuplesortvariants.c.

498{
500 sortopt);
502 MemoryContext oldcontext;
504 int i;
505
506 oldcontext = MemoryContextSwitchTo(base->maincontext);
508
509 if (trace_sort)
510 elog(LOG,
511 "begin index sort: workMem = %d, randomAccess = %c",
512 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
513
515
519 base->writetup = writetup_index;
520 base->readtup = readtup_index;
521 base->haveDatum1 = true;
522 base->arg = arg;
523
524 arg->index.heapRel = heapRel;
525 arg->index.indexRel = indexRel;
526 arg->enforceUnique = false;
527 arg->uniqueNullsNotDistinct = false;
528
529 /* Prepare SortSupport data for each column */
530 base->sortKeys = (SortSupport) palloc0(base->nKeys *
531 sizeof(SortSupportData));
532
533 for (i = 0; i < base->nKeys; i++)
534 {
535 SortSupport sortKey = base->sortKeys + i;
536
538 sortKey->ssup_collation = indexRel->rd_indcollation[i];
539 sortKey->ssup_nulls_first = false;
540 sortKey->ssup_attno = i + 1;
541 /* Convey if abbreviation optimization is applicable in principle */
542 sortKey->abbreviate = (i == 0 && base->haveDatum1);
543
544 Assert(sortKey->ssup_attno != 0);
545
546 /* Look for a sort support function */
548 }
549
550 MemoryContextSwitchTo(oldcontext);
551
552 return state;
553}
void PrepareSortSupportFromGistIndexRel(Relation indexRel, SortSupport ssup)

References arg, TuplesortPublic::arg, Assert, TuplesortPublic::comparetup, comparetup_index_btree(), comparetup_index_btree_tiebreak(), TuplesortPublic::comparetup_tiebreak, CurrentMemoryContext, elog, fb(), TuplesortPublic::haveDatum1, i, IndexRelationGetNumberOfKeyAttributes, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc0(), palloc_object, PrepareSortSupportFromGistIndexRel(), RelationData::rd_indcollation, TuplesortPublic::readtup, readtup_index(), TuplesortPublic::removeabbrev, removeabbrev_index(), TuplesortPublic::sortKeys, SortSupportData::ssup_cxt, trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index().

Referenced by gistbuild().

◆ tuplesort_begin_index_hash()

Tuplesortstate * tuplesort_begin_index_hash ( Relation  heapRel,
Relation  indexRel,
uint32  high_mask,
uint32  low_mask,
uint32  max_buckets,
int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)
extern

Definition at line 442 of file tuplesortvariants.c.

450{
452 sortopt);
454 MemoryContext oldcontext;
456
457 oldcontext = MemoryContextSwitchTo(base->maincontext);
459
460 if (trace_sort)
461 elog(LOG,
462 "begin index sort: high_mask = 0x%x, low_mask = 0x%x, "
463 "max_buckets = 0x%x, workMem = %d, randomAccess = %c",
464 high_mask,
465 low_mask,
466 max_buckets,
467 workMem,
468 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
469
470 base->nKeys = 1; /* Only one sort column, the hash code */
471
475 base->writetup = writetup_index;
476 base->readtup = readtup_index;
477 base->haveDatum1 = true;
478 base->arg = arg;
479
480 arg->index.heapRel = heapRel;
481 arg->index.indexRel = indexRel;
482
483 arg->high_mask = high_mask;
484 arg->low_mask = low_mask;
485 arg->max_buckets = max_buckets;
486
487 MemoryContextSwitchTo(oldcontext);
488
489 return state;
490}
static int comparetup_index_hash(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_hash_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)

References arg, TuplesortPublic::arg, TuplesortPublic::comparetup, comparetup_index_hash(), comparetup_index_hash_tiebreak(), TuplesortPublic::comparetup_tiebreak, elog, fb(), TuplesortPublic::haveDatum1, LOG, TuplesortPublic::maincontext, MemoryContextSwitchTo(), TuplesortPublic::nKeys, palloc_object, TuplesortPublic::readtup, readtup_index(), TuplesortPublic::removeabbrev, removeabbrev_index(), trace_sort, tuplesort_begin_common(), TUPLESORT_RANDOMACCESS, TuplesortstateGetPublic, TuplesortPublic::writetup, and writetup_index().

Referenced by _h_spoolinit().

◆ tuplesort_end()

◆ tuplesort_estimate_shared()

Size tuplesort_estimate_shared ( int  nWorkers)
extern

Definition at line 3189 of file tuplesort.c.

3190{
3192
3193 Assert(nWorkers > 0);
3194
3195 /* Make sure that BufFile shared state is MAXALIGN'd */
3198
3199 return tapesSize;
3200}
#define MAXALIGN(LEN)
Definition c.h:838
size_t Size
Definition c.h:631
Size add_size(Size s1, Size s2)
Definition shmem.c:482
Size mul_size(Size s1, Size s2)
Definition shmem.c:497

References add_size(), Assert, fb(), MAXALIGN, and mul_size().

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), and _gin_begin_parallel().

◆ tuplesort_get_stats()

void tuplesort_get_stats ( Tuplesortstate state,
TuplesortInstrumentation stats 
)
extern

Definition at line 2395 of file tuplesort.c.

2397{
2398 /*
2399 * Note: it might seem we should provide both memory and disk usage for a
2400 * disk-based sort. However, the current code doesn't track memory space
2401 * accurately once we have begun to return tuples to the caller (since we
2402 * don't account for pfree's the caller is expected to do), so we cannot
2403 * rely on availMem in a disk sort. This does not seem worth the overhead
2404 * to fix. Is it worth creating an API for the memory context code to
2405 * tell us how much is actually used in sortcontext?
2406 */
2408
2409 if (state->isMaxSpaceDisk)
2411 else
2413 stats->spaceUsed = (state->maxSpace + 1023) / 1024;
2414
2415 switch (state->maxSpaceStatus)
2416 {
2417 case TSS_SORTEDINMEM:
2418 if (state->boundUsed)
2420 else
2422 break;
2423 case TSS_SORTEDONTAPE:
2425 break;
2426 case TSS_FINALMERGE:
2428 break;
2429 default:
2431 break;
2432 }
2433}
@ SORT_SPACE_TYPE_DISK
@ SORT_SPACE_TYPE_MEMORY
@ SORT_TYPE_EXTERNAL_SORT
@ SORT_TYPE_TOP_N_HEAPSORT
@ SORT_TYPE_QUICKSORT
@ SORT_TYPE_STILL_IN_PROGRESS
@ SORT_TYPE_EXTERNAL_MERGE
TuplesortSpaceType spaceType
@ TSS_SORTEDONTAPE
Definition tuplesort.c:158
@ TSS_SORTEDINMEM
Definition tuplesort.c:157
@ TSS_FINALMERGE
Definition tuplesort.c:159
static void tuplesort_updatemax(Tuplesortstate *state)
Definition tuplesort.c:864

References SORT_SPACE_TYPE_DISK, SORT_SPACE_TYPE_MEMORY, SORT_TYPE_EXTERNAL_MERGE, SORT_TYPE_EXTERNAL_SORT, SORT_TYPE_QUICKSORT, SORT_TYPE_STILL_IN_PROGRESS, SORT_TYPE_TOP_N_HEAPSORT, TuplesortInstrumentation::sortMethod, TuplesortInstrumentation::spaceType, TuplesortInstrumentation::spaceUsed, TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and tuplesort_updatemax().

Referenced by ExecSort(), instrumentSortedGroup(), and show_sort_info().

◆ tuplesort_getbrintuple()

BrinTuple * tuplesort_getbrintuple ( Tuplesortstate state,
Size len,
bool  forward 
)
extern

Definition at line 1084 of file tuplesortvariants.c.

1085{
1090
1092 stup.tuple = NULL;
1093
1094 MemoryContextSwitchTo(oldcontext);
1095
1096 if (!stup.tuple)
1097 return NULL;
1098
1099 btup = (BrinSortTuple *) stup.tuple;
1100
1101 *len = btup->tuplen;
1102
1103 return &btup->tuple;
1104}
MemoryContext sortcontext
Definition tuplesort.h:188
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
Definition tuplesort.c:1366

References fb(), len, MemoryContextSwitchTo(), TuplesortPublic::sortcontext, BrinSortTuple::tuple, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by _brin_parallel_merge().

◆ tuplesort_getdatum()

bool tuplesort_getdatum ( Tuplesortstate state,
bool  forward,
bool  copy,
Datum val,
bool isNull,
Datum abbrev 
)
extern

Definition at line 1154 of file tuplesortvariants.c.

1156{
1161
1163 {
1164 MemoryContextSwitchTo(oldcontext);
1165 return false;
1166 }
1167
1168 /* Ensure we copy into caller's memory context */
1169 MemoryContextSwitchTo(oldcontext);
1170
1171 /* Record abbreviated key for caller */
1172 if (base->sortKeys->abbrev_converter && abbrev)
1173 *abbrev = stup.datum1;
1174
1175 if (stup.isnull1 || !base->tuples)
1176 {
1177 *val = stup.datum1;
1178 *isNull = stup.isnull1;
1179 }
1180 else
1181 {
1182 /* use stup.tuple because stup.datum1 may be an abbreviation */
1183 if (copy)
1184 *val = datumCopy(PointerGetDatum(stup.tuple), false,
1185 arg->datumTypeLen);
1186 else
1187 *val = PointerGetDatum(stup.tuple);
1188 *isNull = false;
1189 }
1190
1191 return true;
1192}
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition datum.c:132
long val
Definition informix.c:689
static Datum PointerGetDatum(const void *X)
Definition postgres.h:352

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, datumCopy(), fb(), MemoryContextSwitchTo(), PointerGetDatum(), TuplesortPublic::sortcontext, TuplesortPublic::sortKeys, TuplesortPublic::tuples, tuplesort_gettuple_common(), TuplesortstateGetPublic, and val.

Referenced by array_sort_internal(), ExecSort(), heapam_index_validate_scan(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), percentile_disc_multi_final(), and process_ordered_aggregate_single().

◆ tuplesort_getgintuple()

GinTuple * tuplesort_getgintuple ( Tuplesortstate state,
Size len,
bool  forward 
)
extern

Definition at line 1107 of file tuplesortvariants.c.

1108{
1112 GinTuple *tup;
1113
1115 stup.tuple = NULL;
1116
1117 MemoryContextSwitchTo(oldcontext);
1118
1119 if (!stup.tuple)
1120 return NULL;
1121
1122 tup = (GinTuple *) stup.tuple;
1123
1124 *len = tup->tuplen;
1125
1126 return tup;
1127}

References fb(), len, MemoryContextSwitchTo(), TuplesortPublic::sortcontext, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by _gin_parallel_merge(), and _gin_process_worker_data().

◆ tuplesort_getheaptuple()

HeapTuple tuplesort_getheaptuple ( Tuplesortstate state,
bool  forward 
)
extern

◆ tuplesort_getindextuple()

IndexTuple tuplesort_getindextuple ( Tuplesortstate state,
bool  forward 
)
extern

◆ tuplesort_gettuple_common()

bool tuplesort_gettuple_common ( Tuplesortstate state,
bool  forward,
SortTuple stup 
)
extern

Definition at line 1366 of file tuplesort.c.

1368{
1369 unsigned int tuplen;
1370 size_t nmoved;
1371
1372 Assert(!WORKER(state));
1373
1374 switch (state->status)
1375 {
1376 case TSS_SORTEDINMEM:
1377 Assert(forward || state->base.sortopt & TUPLESORT_RANDOMACCESS);
1378 Assert(!state->slabAllocatorUsed);
1379 if (forward)
1380 {
1381 if (state->current < state->memtupcount)
1382 {
1383 *stup = state->memtuples[state->current++];
1384 return true;
1385 }
1386 state->eof_reached = true;
1387
1388 /*
1389 * Complain if caller tries to retrieve more tuples than
1390 * originally asked for in a bounded sort. This is because
1391 * returning EOF here might be the wrong thing.
1392 */
1393 if (state->bounded && state->current >= state->bound)
1394 elog(ERROR, "retrieved too many tuples in a bounded sort");
1395
1396 return false;
1397 }
1398 else
1399 {
1400 if (state->current <= 0)
1401 return false;
1402
1403 /*
1404 * if all tuples are fetched already then we return last
1405 * tuple, else - tuple before last returned.
1406 */
1407 if (state->eof_reached)
1408 state->eof_reached = false;
1409 else
1410 {
1411 state->current--; /* last returned tuple */
1412 if (state->current <= 0)
1413 return false;
1414 }
1415 *stup = state->memtuples[state->current - 1];
1416 return true;
1417 }
1418 break;
1419
1420 case TSS_SORTEDONTAPE:
1421 Assert(forward || state->base.sortopt & TUPLESORT_RANDOMACCESS);
1422 Assert(state->slabAllocatorUsed);
1423
1424 /*
1425 * The slot that held the tuple that we returned in previous
1426 * gettuple call can now be reused.
1427 */
1428 if (state->lastReturnedTuple)
1429 {
1430 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1431 state->lastReturnedTuple = NULL;
1432 }
1433
1434 if (forward)
1435 {
1436 if (state->eof_reached)
1437 return false;
1438
1439 if ((tuplen = getlen(state->result_tape, true)) != 0)
1440 {
1441 READTUP(state, stup, state->result_tape, tuplen);
1442
1443 /*
1444 * Remember the tuple we return, so that we can recycle
1445 * its memory on next call. (This can be NULL, in the
1446 * !state->tuples case).
1447 */
1448 state->lastReturnedTuple = stup->tuple;
1449
1450 return true;
1451 }
1452 else
1453 {
1454 state->eof_reached = true;
1455 return false;
1456 }
1457 }
1458
1459 /*
1460 * Backward.
1461 *
1462 * if all tuples are fetched already then we return last tuple,
1463 * else - tuple before last returned.
1464 */
1465 if (state->eof_reached)
1466 {
1467 /*
1468 * Seek position is pointing just past the zero tuplen at the
1469 * end of file; back up to fetch last tuple's ending length
1470 * word. If seek fails we must have a completely empty file.
1471 */
1472 nmoved = LogicalTapeBackspace(state->result_tape,
1473 2 * sizeof(unsigned int));
1474 if (nmoved == 0)
1475 return false;
1476 else if (nmoved != 2 * sizeof(unsigned int))
1477 elog(ERROR, "unexpected tape position");
1478 state->eof_reached = false;
1479 }
1480 else
1481 {
1482 /*
1483 * Back up and fetch previously-returned tuple's ending length
1484 * word. If seek fails, assume we are at start of file.
1485 */
1486 nmoved = LogicalTapeBackspace(state->result_tape,
1487 sizeof(unsigned int));
1488 if (nmoved == 0)
1489 return false;
1490 else if (nmoved != sizeof(unsigned int))
1491 elog(ERROR, "unexpected tape position");
1492 tuplen = getlen(state->result_tape, false);
1493
1494 /*
1495 * Back up to get ending length word of tuple before it.
1496 */
1497 nmoved = LogicalTapeBackspace(state->result_tape,
1498 tuplen + 2 * sizeof(unsigned int));
1499 if (nmoved == tuplen + sizeof(unsigned int))
1500 {
1501 /*
1502 * We backed up over the previous tuple, but there was no
1503 * ending length word before it. That means that the prev
1504 * tuple is the first tuple in the file. It is now the
1505 * next to read in forward direction (not obviously right,
1506 * but that is what in-memory case does).
1507 */
1508 return false;
1509 }
1510 else if (nmoved != tuplen + 2 * sizeof(unsigned int))
1511 elog(ERROR, "bogus tuple length in backward scan");
1512 }
1513
1514 tuplen = getlen(state->result_tape, false);
1515
1516 /*
1517 * Now we have the length of the prior tuple, back up and read it.
1518 * Note: READTUP expects we are positioned after the initial
1519 * length word of the tuple, so back up to that point.
1520 */
1521 nmoved = LogicalTapeBackspace(state->result_tape,
1522 tuplen);
1523 if (nmoved != tuplen)
1524 elog(ERROR, "bogus tuple length in backward scan");
1525 READTUP(state, stup, state->result_tape, tuplen);
1526
1527 /*
1528 * Remember the tuple we return, so that we can recycle its memory
1529 * on next call. (This can be NULL, in the Datum case).
1530 */
1531 state->lastReturnedTuple = stup->tuple;
1532
1533 return true;
1534
1535 case TSS_FINALMERGE:
1536 Assert(forward);
1537 /* We are managing memory ourselves, with the slab allocator. */
1538 Assert(state->slabAllocatorUsed);
1539
1540 /*
1541 * The slab slot holding the tuple that we returned in previous
1542 * gettuple call can now be reused.
1543 */
1544 if (state->lastReturnedTuple)
1545 {
1546 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1547 state->lastReturnedTuple = NULL;
1548 }
1549
1550 /*
1551 * This code should match the inner loop of mergeonerun().
1552 */
1553 if (state->memtupcount > 0)
1554 {
1555 int srcTapeIndex = state->memtuples[0].srctape;
1556 LogicalTape *srcTape = state->inputTapes[srcTapeIndex];
1558
1559 *stup = state->memtuples[0];
1560
1561 /*
1562 * Remember the tuple we return, so that we can recycle its
1563 * memory on next call. (This can be NULL, in the Datum case).
1564 */
1565 state->lastReturnedTuple = stup->tuple;
1566
1567 /*
1568 * Pull next tuple from tape, and replace the returned tuple
1569 * at top of the heap with it.
1570 */
1572 {
1573 /*
1574 * If no more data, we've reached end of run on this tape.
1575 * Remove the top node from the heap.
1576 */
1578 state->nInputRuns--;
1579
1580 /*
1581 * Close the tape. It'd go away at the end of the sort
1582 * anyway, but better to release the memory early.
1583 */
1585 return true;
1586 }
1587 newtup.srctape = srcTapeIndex;
1589 return true;
1590 }
1591 return false;
1592
1593 default:
1594 elog(ERROR, "invalid tuplesort state");
1595 return false; /* keep compiler quiet */
1596 }
1597}
size_t LogicalTapeBackspace(LogicalTape *lt, size_t size)
Definition logtape.c:1062
void LogicalTapeClose(LogicalTape *lt)
Definition logtape.c:733
static void tuplesort_heap_delete_top(Tuplesortstate *state)
Definition tuplesort.c:3046
static unsigned int getlen(LogicalTape *tape, bool eofOK)
Definition tuplesort.c:3128
#define READTUP(state, stup, tape, len)
Definition tuplesort.c:395
#define WORKER(state)
Definition tuplesort.c:401
static bool mergereadnext(Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
Definition tuplesort.c:2184
#define RELEASE_SLAB_SLOT(state, tuple)
Definition tuplesort.c:380
static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple)
Definition tuplesort.c:3070

References Assert, elog, ERROR, fb(), getlen(), LogicalTapeBackspace(), LogicalTapeClose(), mergereadnext(), READTUP, RELEASE_SLAB_SLOT, TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, tuplesort_heap_delete_top(), tuplesort_heap_replace_top(), TUPLESORT_RANDOMACCESS, and WORKER.

Referenced by tuplesort_getbrintuple(), tuplesort_getdatum(), tuplesort_getgintuple(), tuplesort_getheaptuple(), tuplesort_getindextuple(), tuplesort_gettupleslot(), and tuplesort_skiptuples().

◆ tuplesort_gettupleslot()

bool tuplesort_gettupleslot ( Tuplesortstate state,
bool  forward,
bool  copy,
TupleTableSlot slot,
Datum abbrev 
)
extern

Definition at line 1004 of file tuplesortvariants.c.

1006{
1010
1012 stup.tuple = NULL;
1013
1014 MemoryContextSwitchTo(oldcontext);
1015
1016 if (stup.tuple)
1017 {
1018 /* Record abbreviated key for caller */
1019 if (base->sortKeys->abbrev_converter && abbrev)
1020 *abbrev = stup.datum1;
1021
1022 if (copy)
1023 stup.tuple = heap_copy_minimal_tuple((MinimalTuple) stup.tuple, 0);
1024
1026 return true;
1027 }
1028 else
1029 {
1030 ExecClearTuple(slot);
1031 return false;
1032 }
1033}
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup, Size extra)
Definition heaptuple.c:1541
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition tuptable.h:457

References SortSupportData::abbrev_converter, ExecClearTuple(), ExecStoreMinimalTuple(), fb(), heap_copy_minimal_tuple(), MemoryContextSwitchTo(), TuplesortPublic::sortcontext, TuplesortPublic::sortKeys, SortTuple::tuple, tuplesort_gettuple_common(), and TuplesortstateGetPublic.

Referenced by ExecIncrementalSort(), ExecSort(), fetch_input_tuple(), hypothetical_dense_rank_final(), hypothetical_rank_common(), process_ordered_aggregate_multi(), and switchToPresortedPrefixMode().

◆ tuplesort_initialize_shared()

void tuplesort_initialize_shared ( Sharedsort shared,
int  nWorkers,
dsm_segment seg 
)
extern

Definition at line 3210 of file tuplesort.c.

3211{
3212 int i;
3213
3214 Assert(nWorkers > 0);
3215
3216 SpinLockInit(&shared->mutex);
3217 shared->currentWorker = 0;
3218 shared->workersFinished = 0;
3219 SharedFileSetInit(&shared->fileset, seg);
3220 shared->nTapes = nWorkers;
3221 for (i = 0; i < nWorkers; i++)
3222 {
3223 shared->tapes[i].firstblocknumber = 0L;
3224 }
3225}
void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
#define SpinLockInit(lock)
Definition spin.h:57
TapeShare tapes[FLEXIBLE_ARRAY_MEMBER]
Definition tuplesort.c:366
int workersFinished
Definition tuplesort.c:354
slock_t mutex
Definition tuplesort.c:343
int currentWorker
Definition tuplesort.c:353
int64 firstblocknumber
Definition logtape.h:54

References Assert, Sharedsort::currentWorker, fb(), Sharedsort::fileset, TapeShare::firstblocknumber, i, Sharedsort::mutex, Sharedsort::nTapes, SharedFileSetInit(), SpinLockInit, Sharedsort::tapes, and Sharedsort::workersFinished.

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), and _gin_begin_parallel().

◆ tuplesort_markpos()

void tuplesort_markpos ( Tuplesortstate state)
extern

Definition at line 2331 of file tuplesort.c.

2332{
2333 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2334
2335 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2336
2337 switch (state->status)
2338 {
2339 case TSS_SORTEDINMEM:
2340 state->markpos_offset = state->current;
2341 state->markpos_eof = state->eof_reached;
2342 break;
2343 case TSS_SORTEDONTAPE:
2344 LogicalTapeTell(state->result_tape,
2345 &state->markpos_block,
2346 &state->markpos_offset);
2347 state->markpos_eof = state->eof_reached;
2348 break;
2349 default:
2350 elog(ERROR, "invalid tuplesort state");
2351 break;
2352 }
2353
2354 MemoryContextSwitchTo(oldcontext);
2355}
void LogicalTapeTell(LogicalTape *lt, int64 *blocknum, int *offset)
Definition logtape.c:1162

References Assert, elog, ERROR, LogicalTapeTell(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecSortMarkPos().

◆ tuplesort_merge_order()

int tuplesort_merge_order ( int64  allowedMem)
extern

Definition at line 1674 of file tuplesort.c.

1675{
1676 int mOrder;
1677
1678 /*----------
1679 * In the merge phase, we need buffer space for each input and output tape.
1680 * Each pass in the balanced merge algorithm reads from M input tapes, and
1681 * writes to N output tapes. Each tape consumes TAPE_BUFFER_OVERHEAD bytes
1682 * of memory. In addition to that, we want MERGE_BUFFER_SIZE workspace per
1683 * input tape.
1684 *
1685 * totalMem = M * (TAPE_BUFFER_OVERHEAD + MERGE_BUFFER_SIZE) +
1686 * N * TAPE_BUFFER_OVERHEAD
1687 *
1688 * Except for the last and next-to-last merge passes, where there can be
1689 * fewer tapes left to process, M = N. We choose M so that we have the
1690 * desired amount of memory available for the input buffers
1691 * (TAPE_BUFFER_OVERHEAD + MERGE_BUFFER_SIZE), given the total memory
1692 * available for the tape buffers (allowedMem).
1693 *
1694 * Note: you might be thinking we need to account for the memtuples[]
1695 * array in this calculation, but we effectively treat that as part of the
1696 * MERGE_BUFFER_SIZE workspace.
1697 *----------
1698 */
1699 mOrder = allowedMem /
1701
1702 /*
1703 * Even in minimum memory, use at least a MINORDER merge. On the other
1704 * hand, even when we have lots of memory, do not use more than a MAXORDER
1705 * merge. Tapes are pretty cheap, but they're not entirely free. Each
1706 * additional tape reduces the amount of memory available to build runs,
1707 * which in turn can cause the same sort to need more runs, which makes
1708 * merging slower even if it can still be done in a single pass. Also,
1709 * high order merges are quite slow due to CPU cache effects; it can be
1710 * faster to pay the I/O cost of a multi-pass merge than to perform a
1711 * single merge pass across many hundreds of tapes.
1712 */
1715
1716 return mOrder;
1717}
#define Min(x, y)
Definition c.h:1019
#define TAPE_BUFFER_OVERHEAD
Definition tuplesort.c:176
#define MAXORDER
Definition tuplesort.c:175
#define MERGE_BUFFER_SIZE
Definition tuplesort.c:177
#define MINORDER
Definition tuplesort.c:174

References fb(), Max, MAXORDER, MERGE_BUFFER_SIZE, Min, MINORDER, and TAPE_BUFFER_OVERHEAD.

Referenced by cost_tuplesort(), and inittapes().

◆ tuplesort_method_name()

const char * tuplesort_method_name ( TuplesortMethod  m)
extern

Definition at line 2439 of file tuplesort.c.

2440{
2441 switch (m)
2442 {
2444 return "still in progress";
2446 return "top-N heapsort";
2448 return "quicksort";
2450 return "external sort";
2452 return "external merge";
2453 }
2454
2455 return "unknown";
2456}

References SORT_TYPE_EXTERNAL_MERGE, SORT_TYPE_EXTERNAL_SORT, SORT_TYPE_QUICKSORT, SORT_TYPE_STILL_IN_PROGRESS, and SORT_TYPE_TOP_N_HEAPSORT.

Referenced by show_incremental_sort_group_info(), and show_sort_info().

◆ tuplesort_performsort()

void tuplesort_performsort ( Tuplesortstate state)
extern

Definition at line 1259 of file tuplesort.c.

1260{
1261 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1262
1263 if (trace_sort)
1264 elog(LOG, "performsort of worker %d starting: %s",
1265 state->worker, pg_rusage_show(&state->ru_start));
1266
1267 switch (state->status)
1268 {
1269 case TSS_INITIAL:
1270
1271 /*
1272 * We were able to accumulate all the tuples within the allowed
1273 * amount of memory, or leader to take over worker tapes
1274 */
1275 if (SERIAL(state))
1276 {
1277 /* Sort in memory and we're done */
1279 state->status = TSS_SORTEDINMEM;
1280 }
1281 else if (WORKER(state))
1282 {
1283 /*
1284 * Parallel workers must still dump out tuples to tape. No
1285 * merge is required to produce single output run, though.
1286 */
1287 inittapes(state, false);
1288 dumptuples(state, true);
1290 state->status = TSS_SORTEDONTAPE;
1291 }
1292 else
1293 {
1294 /*
1295 * Leader will take over worker tapes and merge worker runs.
1296 * Note that mergeruns sets the correct state->status.
1297 */
1300 }
1301 state->current = 0;
1302 state->eof_reached = false;
1303 state->markpos_block = 0L;
1304 state->markpos_offset = 0;
1305 state->markpos_eof = false;
1306 break;
1307
1308 case TSS_BOUNDED:
1309
1310 /*
1311 * We were able to accumulate all the tuples required for output
1312 * in memory, using a heap to eliminate excess tuples. Now we
1313 * have to transform the heap to a properly-sorted array. Note
1314 * that sort_bounded_heap sets the correct state->status.
1315 */
1317 state->current = 0;
1318 state->eof_reached = false;
1319 state->markpos_offset = 0;
1320 state->markpos_eof = false;
1321 break;
1322
1323 case TSS_BUILDRUNS:
1324
1325 /*
1326 * Finish tape-based sort. First, flush all tuples remaining in
1327 * memory out to tape; then merge until we have a single remaining
1328 * run (or, if !randomAccess and !WORKER(), one run per tape).
1329 * Note that mergeruns sets the correct state->status.
1330 */
1331 dumptuples(state, true);
1333 state->eof_reached = false;
1334 state->markpos_block = 0L;
1335 state->markpos_offset = 0;
1336 state->markpos_eof = false;
1337 break;
1338
1339 default:
1340 elog(ERROR, "invalid tuplesort state");
1341 break;
1342 }
1343
1344 if (trace_sort)
1345 {
1346 if (state->status == TSS_FINALMERGE)
1347 elog(LOG, "performsort of worker %d done (except %d-way final merge): %s",
1348 state->worker, state->nInputTapes,
1349 pg_rusage_show(&state->ru_start));
1350 else
1351 elog(LOG, "performsort of worker %d done: %s",
1352 state->worker, pg_rusage_show(&state->ru_start));
1353 }
1354
1355 MemoryContextSwitchTo(oldcontext);
1356}
const char * pg_rusage_show(const PGRUsage *ru0)
Definition pg_rusage.c:40
#define SERIAL(state)
Definition tuplesort.c:400
static void sort_bounded_heap(Tuplesortstate *state)
Definition tuplesort.c:2532
@ TSS_INITIAL
Definition tuplesort.c:154
@ TSS_BUILDRUNS
Definition tuplesort.c:156
@ TSS_BOUNDED
Definition tuplesort.c:155
static void leader_takeover_tapes(Tuplesortstate *state)
Definition tuplesort.c:3341
static void tuplesort_sort_memtuples(Tuplesortstate *state)
Definition tuplesort.c:2958
static void inittapes(Tuplesortstate *state, bool mergeruns)
Definition tuplesort.c:1761
static void worker_nomergeruns(Tuplesortstate *state)
Definition tuplesort.c:3319
static void mergeruns(Tuplesortstate *state)
Definition tuplesort.c:1913
static void dumptuples(Tuplesortstate *state, bool alltuples)
Definition tuplesort.c:2203

References dumptuples(), elog, ERROR, fb(), inittapes(), leader_takeover_tapes(), LOG, MemoryContextSwitchTo(), mergeruns(), pg_rusage_show(), SERIAL, sort_bounded_heap(), trace_sort, TSS_BOUNDED, TSS_BUILDRUNS, TSS_FINALMERGE, TSS_INITIAL, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, tuplesort_sort_memtuples(), WORKER, and worker_nomergeruns().

Referenced by _brin_parallel_merge(), _brin_parallel_scan_and_build(), _bt_leafbuild(), _bt_parallel_scan_and_sort(), _gin_parallel_merge(), _gin_parallel_scan_and_build(), _gin_process_worker_data(), _h_indexbuild(), array_sort_internal(), ExecIncrementalSort(), ExecSort(), gistbuild(), heapam_relation_copy_for_cluster(), hypothetical_dense_rank_final(), hypothetical_rank_common(), initialize_phase(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), percentile_disc_multi_final(), process_ordered_aggregate_multi(), process_ordered_aggregate_single(), switchToPresortedPrefixMode(), and validate_index().

◆ tuplesort_putbrintuple()

void tuplesort_putbrintuple ( Tuplesortstate state,
BrinTuple tuple,
Size  size 
)
extern

Definition at line 871 of file tuplesortvariants.c.

872{
877 Size tuplen;
878
879 /* allocate space for the whole BRIN sort tuple */
881
882 bstup->tuplen = size;
883 memcpy(&bstup->tuple, tuple, size);
884
885 stup.tuple = bstup;
886 stup.datum1 = UInt32GetDatum(tuple->bt_blkno);
887 stup.isnull1 = false;
888
889 /* GetMemoryChunkSpace is not supported for bump contexts */
891 tuplen = MAXALIGN(BRINSORTTUPLE_SIZE(size));
892 else
893 tuplen = GetMemoryChunkSpace(bstup);
894
896 base->sortKeys &&
897 base->sortKeys->abbrev_converter &&
898 !stup.isnull1, tuplen);
899
900 MemoryContextSwitchTo(oldcontext);
901}
Size GetMemoryChunkSpace(void *pointer)
Definition mcxt.c:770
void * palloc(Size size)
Definition mcxt.c:1387
static Datum UInt32GetDatum(uint32 X)
Definition postgres.h:242
BlockNumber bt_blkno
Definition brin_tuple.h:66
MemoryContext tuplecontext
Definition tuplesort.h:189
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
Definition tuplesort.c:1065
#define TupleSortUseBumpTupleCxt(opt)
Definition tuplesort.h:82
#define BRINSORTTUPLE_SIZE(len)

References SortSupportData::abbrev_converter, BRINSORTTUPLE_SIZE, BrinTuple::bt_blkno, fb(), GetMemoryChunkSpace(), MAXALIGN, MemoryContextSwitchTo(), palloc(), TuplesortPublic::sortKeys, TuplesortPublic::sortopt, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, TupleSortUseBumpTupleCxt, and UInt32GetDatum().

Referenced by form_and_spill_tuple().

◆ tuplesort_putdatum()

void tuplesort_putdatum ( Tuplesortstate state,
Datum  val,
bool  isNull 
)
extern

Definition at line 940 of file tuplesortvariants.c.

941{
946
947 /*
948 * Pass-by-value types or null values are just stored directly in
949 * stup.datum1 (and stup.tuple is not used and set to NULL).
950 *
951 * Non-null pass-by-reference values need to be copied into memory we
952 * control, and possibly abbreviated. The copied value is pointed to by
953 * stup.tuple and is treated as the canonical copy (e.g. to return via
954 * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
955 * abbreviated value if abbreviation is happening, otherwise it's
956 * identical to stup.tuple.
957 */
958
959 if (isNull || !base->tuples)
960 {
961 /*
962 * Set datum1 to zeroed representation for NULLs (to be consistent,
963 * and to support cheap inequality tests for NULL abbreviated keys).
964 */
965 stup.datum1 = !isNull ? val : (Datum) 0;
966 stup.isnull1 = isNull;
967 stup.tuple = NULL; /* no separate storage */
968 }
969 else
970 {
971 stup.isnull1 = false;
972 stup.datum1 = datumCopy(val, false, arg->datumTypeLen);
973 stup.tuple = DatumGetPointer(stup.datum1);
974 }
975
977 base->tuples &&
978 base->sortKeys->abbrev_converter && !isNull, 0);
979
980 MemoryContextSwitchTo(oldcontext);
981}
uint64_t Datum
Definition postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition postgres.h:342
Datum datum1
Definition tuplesort.h:117

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, SortTuple::datum1, datumCopy(), DatumGetPointer(), fb(), MemoryContextSwitchTo(), TuplesortPublic::sortKeys, TuplesortPublic::tuplecontext, TuplesortPublic::tuples, tuplesort_puttuple_common(), TuplesortstateGetPublic, and val.

Referenced by array_sort_internal(), ExecEvalAggOrderedTransDatum(), ExecSort(), ordered_set_transition(), and validate_index_callback().

◆ tuplesort_putgintuple()

void tuplesort_putgintuple ( Tuplesortstate state,
GinTuple tuple,
Size  size 
)
extern

Definition at line 904 of file tuplesortvariants.c.

905{
907 GinTuple *ctup;
910 Size tuplen;
911
912 /* copy the GinTuple into the right memory context */
913 ctup = palloc(size);
914 memcpy(ctup, tuple, size);
915
916 stup.tuple = ctup;
917 stup.datum1 = (Datum) 0;
918 stup.isnull1 = false;
919
920 /* GetMemoryChunkSpace is not supported for bump contexts */
922 tuplen = MAXALIGN(size);
923 else
924 tuplen = GetMemoryChunkSpace(ctup);
925
927 base->sortKeys &&
928 base->sortKeys->abbrev_converter &&
929 !stup.isnull1, tuplen);
930
931 MemoryContextSwitchTo(oldcontext);
932}

References SortSupportData::abbrev_converter, fb(), GetMemoryChunkSpace(), MAXALIGN, MemoryContextSwitchTo(), palloc(), TuplesortPublic::sortKeys, TuplesortPublic::sortopt, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, and TupleSortUseBumpTupleCxt.

Referenced by _gin_process_worker_data(), and ginFlushBuildState().

◆ tuplesort_putheaptuple()

void tuplesort_putheaptuple ( Tuplesortstate state,
HeapTuple  tup 
)
extern

Definition at line 792 of file tuplesortvariants.c.

793{
798 Size tuplen;
799
800 /* copy the tuple into sort storage */
802 stup.tuple = tup;
803
804 /*
805 * set up first-column key value, and potentially abbreviate, if it's a
806 * simple column
807 */
808 if (base->haveDatum1)
809 {
810 stup.datum1 = heap_getattr(tup,
811 arg->indexInfo->ii_IndexAttrNumbers[0],
812 arg->tupDesc,
813 &stup.isnull1);
814 }
815
816 /* GetMemoryChunkSpace is not supported for bump contexts */
817 if (TupleSortUseBumpTupleCxt(base->sortopt))
818 tuplen = MAXALIGN(HEAPTUPLESIZE + tup->t_len);
819 else
820 tuplen = GetMemoryChunkSpace(tup);
821
823 base->haveDatum1 &&
824 base->sortKeys->abbrev_converter &&
825 !stup.isnull1, tuplen);
826
827 MemoryContextSwitchTo(oldcontext);
828}
HeapTuple heap_copytuple(HeapTuple tuple)
Definition heaptuple.c:778
#define HEAPTUPLESIZE
Definition htup.h:73
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, fb(), GetMemoryChunkSpace(), TuplesortPublic::haveDatum1, heap_copytuple(), heap_getattr(), HEAPTUPLESIZE, MAXALIGN, MemoryContextSwitchTo(), TuplesortPublic::sortKeys, TuplesortPublic::sortopt, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, and TupleSortUseBumpTupleCxt.

Referenced by heapam_relation_copy_for_cluster().

◆ tuplesort_putindextuplevalues()

void tuplesort_putindextuplevalues ( Tuplesortstate state,
Relation  rel,
const ItemPointerData self,
const Datum values,
const bool isnull 
)
extern

Definition at line 835 of file tuplesortvariants.c.

838{
840 IndexTuple tuple;
843 Size tuplen;
844
846 isnull, base->tuplecontext);
847 tuple = ((IndexTuple) stup.tuple);
848 tuple->t_tid = *self;
849 /* set up first-column key value */
850 stup.datum1 = index_getattr(tuple,
851 1,
852 RelationGetDescr(arg->indexRel),
853 &stup.isnull1);
854
855 /* GetMemoryChunkSpace is not supported for bump contexts */
856 if (TupleSortUseBumpTupleCxt(base->sortopt))
857 tuplen = MAXALIGN(tuple->t_info & INDEX_SIZE_MASK);
858 else
859 tuplen = GetMemoryChunkSpace(tuple);
860
862 base->sortKeys &&
863 base->sortKeys->abbrev_converter &&
864 !stup.isnull1, tuplen);
865}
static Datum values[MAXATTR]
Definition bootstrap.c:147
IndexTuple index_form_tuple_context(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull, MemoryContext context)
Definition indextuple.c:65
IndexTupleData * IndexTuple
Definition itup.h:53
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition itup.h:131
#define INDEX_SIZE_MASK
Definition itup.h:65

References SortSupportData::abbrev_converter, arg, TuplesortPublic::arg, fb(), GetMemoryChunkSpace(), index_form_tuple_context(), index_getattr(), INDEX_SIZE_MASK, MAXALIGN, RelationGetDescr, TuplesortPublic::sortKeys, TuplesortPublic::sortopt, IndexTupleData::t_info, IndexTupleData::t_tid, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, TupleSortUseBumpTupleCxt, and values.

Referenced by _bt_spool(), _h_spool(), and gistSortedBuildCallback().

◆ tuplesort_puttuple_common()

void tuplesort_puttuple_common ( Tuplesortstate state,
SortTuple tuple,
bool  useAbbrev,
Size  tuplen 
)
extern

Definition at line 1065 of file tuplesort.c.

1067{
1068 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1069
1070 Assert(!LEADER(state));
1071
1072 /* account for the memory used for this tuple */
1073 USEMEM(state, tuplen);
1074 state->tupleMem += tuplen;
1075
1076 if (!useAbbrev)
1077 {
1078 /*
1079 * Leave ordinary Datum representation, or NULL value. If there is a
1080 * converter it won't expect NULL values, and cost model is not
1081 * required to account for NULL, so in that case we avoid calling
1082 * converter and just set datum1 to zeroed representation (to be
1083 * consistent, and to support cheap inequality tests for NULL
1084 * abbreviated keys).
1085 */
1086 }
1087 else if (!consider_abort_common(state))
1088 {
1089 /* Store abbreviated key representation */
1090 tuple->datum1 = state->base.sortKeys->abbrev_converter(tuple->datum1,
1091 state->base.sortKeys);
1092 }
1093 else
1094 {
1095 /*
1096 * Set state to be consistent with never trying abbreviation.
1097 *
1098 * Alter datum1 representation in already-copied tuples, so as to
1099 * ensure a consistent representation (current tuple was just
1100 * handled). It does not matter if some dumped tuples are already
1101 * sorted on tape, since serialized tuples lack abbreviated keys
1102 * (TSS_BUILDRUNS state prevents control reaching here in any case).
1103 */
1104 REMOVEABBREV(state, state->memtuples, state->memtupcount);
1105 }
1106
1107 switch (state->status)
1108 {
1109 case TSS_INITIAL:
1110
1111 /*
1112 * Save the tuple into the unsorted array. First, grow the array
1113 * as needed. Note that we try to grow the array when there is
1114 * still one free slot remaining --- if we fail, there'll still be
1115 * room to store the incoming tuple, and then we'll switch to
1116 * tape-based operation.
1117 */
1118 if (state->memtupcount >= state->memtupsize - 1)
1119 {
1121 Assert(state->memtupcount < state->memtupsize);
1122 }
1123 state->memtuples[state->memtupcount++] = *tuple;
1124
1125 /*
1126 * Check if it's time to switch over to a bounded heapsort. We do
1127 * so if the input tuple count exceeds twice the desired tuple
1128 * count (this is a heuristic for where heapsort becomes cheaper
1129 * than a quicksort), or if we've just filled workMem and have
1130 * enough tuples to meet the bound.
1131 *
1132 * Note that once we enter TSS_BOUNDED state we will always try to
1133 * complete the sort that way. In the worst case, if later input
1134 * tuples are larger than earlier ones, this might cause us to
1135 * exceed workMem significantly.
1136 */
1137 if (state->bounded &&
1138 (state->memtupcount > state->bound * 2 ||
1139 (state->memtupcount > state->bound && LACKMEM(state))))
1140 {
1141 if (trace_sort)
1142 elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1143 state->memtupcount,
1144 pg_rusage_show(&state->ru_start));
1146 MemoryContextSwitchTo(oldcontext);
1147 return;
1148 }
1149
1150 /*
1151 * Done if we still fit in available memory and have array slots.
1152 */
1153 if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1154 {
1155 MemoryContextSwitchTo(oldcontext);
1156 return;
1157 }
1158
1159 /*
1160 * Nope; time to switch to tape-based operation.
1161 */
1162 inittapes(state, true);
1163
1164 /*
1165 * Dump all tuples.
1166 */
1167 dumptuples(state, false);
1168 break;
1169
1170 case TSS_BOUNDED:
1171
1172 /*
1173 * We don't want to grow the array here, so check whether the new
1174 * tuple can be discarded before putting it in. This should be a
1175 * good speed optimization, too, since when there are many more
1176 * input tuples than the bound, most input tuples can be discarded
1177 * with just this one comparison. Note that because we currently
1178 * have the sort direction reversed, we must check for <= not >=.
1179 */
1180 if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1181 {
1182 /* new tuple <= top of the heap, so we can discard it */
1183 free_sort_tuple(state, tuple);
1185 }
1186 else
1187 {
1188 /* discard top of heap, replacing it with the new tuple */
1189 free_sort_tuple(state, &state->memtuples[0]);
1191 }
1192 break;
1193
1194 case TSS_BUILDRUNS:
1195
1196 /*
1197 * Save the tuple into the unsorted array (there must be space)
1198 */
1199 state->memtuples[state->memtupcount++] = *tuple;
1200
1201 /*
1202 * If we are over the memory limit, dump all tuples.
1203 */
1204 dumptuples(state, false);
1205 break;
1206
1207 default:
1208 elog(ERROR, "invalid tuplesort state");
1209 break;
1210 }
1211 MemoryContextSwitchTo(oldcontext);
1212}
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
#define COMPARETUP(state, a, b)
Definition tuplesort.c:393
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
Definition tuplesort.c:3400
#define REMOVEABBREV(state, stup, count)
Definition tuplesort.c:392
#define LACKMEM(state)
Definition tuplesort.c:397
#define USEMEM(state, amt)
Definition tuplesort.c:398
static bool grow_memtuples(Tuplesortstate *state)
Definition tuplesort.c:948
static void make_bounded_heap(Tuplesortstate *state)
Definition tuplesort.c:2483
#define LEADER(state)
Definition tuplesort.c:402
static bool consider_abort_common(Tuplesortstate *state)
Definition tuplesort.c:1215

References Assert, CHECK_FOR_INTERRUPTS, COMPARETUP, consider_abort_common(), SortTuple::datum1, dumptuples(), elog, ERROR, fb(), free_sort_tuple(), grow_memtuples(), inittapes(), LACKMEM, LEADER, LOG, make_bounded_heap(), MemoryContextSwitchTo(), pg_rusage_show(), REMOVEABBREV, trace_sort, TSS_BOUNDED, TSS_BUILDRUNS, TSS_INITIAL, tuplesort_heap_replace_top(), and USEMEM.

Referenced by tuplesort_putbrintuple(), tuplesort_putdatum(), tuplesort_putgintuple(), tuplesort_putheaptuple(), tuplesort_putindextuplevalues(), and tuplesort_puttupleslot().

◆ tuplesort_puttupleslot()

void tuplesort_puttupleslot ( Tuplesortstate state,
TupleTableSlot slot 
)
extern

Definition at line 752 of file tuplesortvariants.c.

753{
756 TupleDesc tupDesc = (TupleDesc) base->arg;
758 MinimalTuple tuple;
759 HeapTupleData htup;
760 Size tuplen;
761
762 /* copy the tuple into sort storage */
763 tuple = ExecCopySlotMinimalTuple(slot);
764 stup.tuple = tuple;
765 /* set up first-column key value */
766 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
767 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
768 stup.datum1 = heap_getattr(&htup,
769 base->sortKeys[0].ssup_attno,
770 tupDesc,
771 &stup.isnull1);
772
773 /* GetMemoryChunkSpace is not supported for bump contexts */
774 if (TupleSortUseBumpTupleCxt(base->sortopt))
775 tuplen = MAXALIGN(tuple->t_len);
776 else
777 tuplen = GetMemoryChunkSpace(tuple);
778
780 base->sortKeys->abbrev_converter &&
781 !stup.isnull1, tuplen);
782
783 MemoryContextSwitchTo(oldcontext);
784}
HeapTupleHeaderData * HeapTupleHeader
Definition htup.h:23
#define MINIMAL_TUPLE_OFFSET
uint32 t_len
Definition htup.h:64
HeapTupleHeader t_data
Definition htup.h:68
struct TupleDescData * TupleDesc
Definition tupdesc.h:145
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition tuptable.h:495

References SortSupportData::abbrev_converter, TuplesortPublic::arg, ExecCopySlotMinimalTuple(), fb(), GetMemoryChunkSpace(), heap_getattr(), MAXALIGN, MemoryContextSwitchTo(), MINIMAL_TUPLE_OFFSET, TuplesortPublic::sortKeys, TuplesortPublic::sortopt, SortSupportData::ssup_attno, HeapTupleData::t_data, HeapTupleData::t_len, MinimalTupleData::t_len, TuplesortPublic::tuplecontext, tuplesort_puttuple_common(), TuplesortstateGetPublic, and TupleSortUseBumpTupleCxt.

Referenced by ExecEvalAggOrderedTransTuple(), ExecIncrementalSort(), ExecSort(), fetch_input_tuple(), hypothetical_dense_rank_final(), hypothetical_rank_common(), ordered_set_transition_multi(), and switchToPresortedPrefixMode().

◆ tuplesort_readtup_alloc()

void * tuplesort_readtup_alloc ( Tuplesortstate state,
Size  tuplen 
)
extern

Definition at line 3155 of file tuplesort.c.

3156{
3157 SlabSlot *buf;
3158
3159 /*
3160 * We pre-allocate enough slots in the slab arena that we should never run
3161 * out.
3162 */
3163 Assert(state->slabFreeHead);
3164
3165 if (tuplen > SLAB_SLOT_SIZE || !state->slabFreeHead)
3166 return MemoryContextAlloc(state->base.sortcontext, tuplen);
3167 else
3168 {
3169 buf = state->slabFreeHead;
3170 /* Reuse this slot */
3171 state->slabFreeHead = buf->nextfree;
3172
3173 return buf;
3174 }
3175}
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
static char buf[DEFAULT_XLOG_SEG_SIZE]
#define SLAB_SLOT_SIZE
Definition tuplesort.c:140

References Assert, buf, MemoryContextAlloc(), and SLAB_SLOT_SIZE.

Referenced by readtup_cluster(), readtup_datum(), readtup_heap(), readtup_index(), readtup_index_brin(), and readtup_index_gin().

◆ tuplesort_rescan()

void tuplesort_rescan ( Tuplesortstate state)
extern

Definition at line 2298 of file tuplesort.c.

2299{
2300 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2301
2302 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2303
2304 switch (state->status)
2305 {
2306 case TSS_SORTEDINMEM:
2307 state->current = 0;
2308 state->eof_reached = false;
2309 state->markpos_offset = 0;
2310 state->markpos_eof = false;
2311 break;
2312 case TSS_SORTEDONTAPE:
2313 LogicalTapeRewindForRead(state->result_tape, 0);
2314 state->eof_reached = false;
2315 state->markpos_block = 0L;
2316 state->markpos_offset = 0;
2317 state->markpos_eof = false;
2318 break;
2319 default:
2320 elog(ERROR, "invalid tuplesort state");
2321 break;
2322 }
2323
2324 MemoryContextSwitchTo(oldcontext);
2325}
void LogicalTapeRewindForRead(LogicalTape *lt, size_t buffer_size)
Definition logtape.c:846

References Assert, elog, ERROR, fb(), LogicalTapeRewindForRead(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecReScanSort(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), and percentile_disc_multi_final().

◆ tuplesort_reset()

void tuplesort_reset ( Tuplesortstate state)
extern

Definition at line 915 of file tuplesort.c.

916{
919
920 /*
921 * After we've freed up per-batch memory, re-setup all of the state common
922 * to both the first batch and any subsequent batch.
923 */
925
926 state->lastReturnedTuple = NULL;
927 state->slabMemoryBegin = NULL;
928 state->slabMemoryEnd = NULL;
929 state->slabFreeHead = NULL;
930}

References fb(), tuplesort_begin_batch(), tuplesort_free(), and tuplesort_updatemax().

Referenced by ExecIncrementalSort(), ExecReScanIncrementalSort(), and switchToPresortedPrefixMode().

◆ tuplesort_restorepos()

void tuplesort_restorepos ( Tuplesortstate state)
extern

Definition at line 2362 of file tuplesort.c.

2363{
2364 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2365
2366 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2367
2368 switch (state->status)
2369 {
2370 case TSS_SORTEDINMEM:
2371 state->current = state->markpos_offset;
2372 state->eof_reached = state->markpos_eof;
2373 break;
2374 case TSS_SORTEDONTAPE:
2375 LogicalTapeSeek(state->result_tape,
2376 state->markpos_block,
2377 state->markpos_offset);
2378 state->eof_reached = state->markpos_eof;
2379 break;
2380 default:
2381 elog(ERROR, "invalid tuplesort state");
2382 break;
2383 }
2384
2385 MemoryContextSwitchTo(oldcontext);
2386}
void LogicalTapeSeek(LogicalTape *lt, int64 blocknum, int offset)
Definition logtape.c:1133

References Assert, elog, ERROR, LogicalTapeSeek(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecSortRestrPos().

◆ tuplesort_set_bound()

void tuplesort_set_bound ( Tuplesortstate state,
int64  bound 
)
extern

Definition at line 734 of file tuplesort.c.

735{
736 /* Assert we're called before loading any tuples */
737 Assert(state->status == TSS_INITIAL && state->memtupcount == 0);
738 /* Assert we allow bounded sorts */
739 Assert(state->base.sortopt & TUPLESORT_ALLOWBOUNDED);
740 /* Can't set the bound twice, either */
741 Assert(!state->bounded);
742 /* Also, this shouldn't be called in a parallel worker */
744
745 /* Parallel leader allows but ignores hint */
746 if (LEADER(state))
747 return;
748
749#ifdef DEBUG_BOUNDED_SORT
750 /* Honor GUC setting that disables the feature (for easy testing) */
752 return;
753#endif
754
755 /* We want to be able to compute bound * 2, so limit the setting */
756 if (bound > (int64) (INT_MAX / 2))
757 return;
758
759 state->bounded = true;
760 state->bound = (int) bound;
761
762 /*
763 * Bounded sorts are not an effective target for abbreviated key
764 * optimization. Disable by setting state to be consistent with no
765 * abbreviation support.
766 */
767 state->base.sortKeys->abbrev_converter = NULL;
768 if (state->base.sortKeys->abbrev_full_comparator)
769 state->base.sortKeys->comparator = state->base.sortKeys->abbrev_full_comparator;
770
771 /* Not strictly necessary, but be tidy */
772 state->base.sortKeys->abbrev_abort = NULL;
773 state->base.sortKeys->abbrev_full_comparator = NULL;
774}
#define TUPLESORT_ALLOWBOUNDED
Definition tuplesort.h:73

References Assert, fb(), LEADER, TSS_INITIAL, TUPLESORT_ALLOWBOUNDED, and WORKER.

Referenced by ExecIncrementalSort(), ExecSort(), and switchToPresortedPrefixMode().

◆ tuplesort_skiptuples()

bool tuplesort_skiptuples ( Tuplesortstate state,
int64  ntuples,
bool  forward 
)
extern

Definition at line 1606 of file tuplesort.c.

1607{
1608 MemoryContext oldcontext;
1609
1610 /*
1611 * We don't actually support backwards skip yet, because no callers need
1612 * it. The API is designed to allow for that later, though.
1613 */
1614 Assert(forward);
1615 Assert(ntuples >= 0);
1616 Assert(!WORKER(state));
1617
1618 switch (state->status)
1619 {
1620 case TSS_SORTEDINMEM:
1621 if (state->memtupcount - state->current >= ntuples)
1622 {
1623 state->current += ntuples;
1624 return true;
1625 }
1626 state->current = state->memtupcount;
1627 state->eof_reached = true;
1628
1629 /*
1630 * Complain if caller tries to retrieve more tuples than
1631 * originally asked for in a bounded sort. This is because
1632 * returning EOF here might be the wrong thing.
1633 */
1634 if (state->bounded && state->current >= state->bound)
1635 elog(ERROR, "retrieved too many tuples in a bounded sort");
1636
1637 return false;
1638
1639 case TSS_SORTEDONTAPE:
1640 case TSS_FINALMERGE:
1641
1642 /*
1643 * We could probably optimize these cases better, but for now it's
1644 * not worth the trouble.
1645 */
1646 oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1647 while (ntuples-- > 0)
1648 {
1650
1652 {
1653 MemoryContextSwitchTo(oldcontext);
1654 return false;
1655 }
1657 }
1658 MemoryContextSwitchTo(oldcontext);
1659 return true;
1660
1661 default:
1662 elog(ERROR, "invalid tuplesort state");
1663 return false; /* keep compiler quiet */
1664 }
1665}

References Assert, CHECK_FOR_INTERRUPTS, elog, ERROR, fb(), MemoryContextSwitchTo(), TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, tuplesort_gettuple_common(), and WORKER.

Referenced by percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), and percentile_disc_multi_final().

◆ tuplesort_space_type_name()

const char * tuplesort_space_type_name ( TuplesortSpaceType  t)
extern

Definition at line 2462 of file tuplesort.c.

2463{
2465 return t == SORT_SPACE_TYPE_DISK ? "Disk" : "Memory";
2466}

References Assert, SORT_SPACE_TYPE_DISK, and SORT_SPACE_TYPE_MEMORY.

Referenced by show_incremental_sort_group_info(), and show_sort_info().

◆ tuplesort_used_bound()

bool tuplesort_used_bound ( Tuplesortstate state)
extern

Definition at line 782 of file tuplesort.c.

783{
784 return state->boundUsed;
785}

Referenced by ExecIncrementalSort().