PostgreSQL Source Code git master
tuplesort.c File Reference
#include "postgres.h"
#include <limits.h>
#include "commands/tablespace.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "storage/shmem.h"
#include "utils/guc.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/tuplesort.h"
#include "lib/sort_template.h"
Include dependency graph for tuplesort.c:

Go to the source code of this file.

Data Structures

union  SlabSlot
 
struct  Tuplesortstate
 
struct  Sharedsort
 

Macros

#define INITIAL_MEMTUPSIZE
 
#define SLAB_SLOT_SIZE   1024
 
#define MINORDER   6 /* minimum merge order */
 
#define MAXORDER   500 /* maximum merge order */
 
#define TAPE_BUFFER_OVERHEAD   BLCKSZ
 
#define MERGE_BUFFER_SIZE   (BLCKSZ * 32)
 
#define IS_SLAB_SLOT(state, tuple)
 
#define RELEASE_SLAB_SLOT(state, tuple)
 
#define REMOVEABBREV(state, stup, count)   ((*(state)->base.removeabbrev) (state, stup, count))
 
#define COMPARETUP(state, a, b)   ((*(state)->base.comparetup) (a, b, state))
 
#define WRITETUP(state, tape, stup)   ((*(state)->base.writetup) (state, tape, stup))
 
#define READTUP(state, stup, tape, len)   ((*(state)->base.readtup) (state, stup, tape, len))
 
#define FREESTATE(state)   ((state)->base.freestate ? (*(state)->base.freestate) (state) : (void) 0)
 
#define LACKMEM(state)   ((state)->availMem < 0 && !(state)->slabAllocatorUsed)
 
#define USEMEM(state, amt)   ((state)->availMem -= (amt))
 
#define FREEMEM(state, amt)   ((state)->availMem += (amt))
 
#define SERIAL(state)   ((state)->shared == NULL)
 
#define WORKER(state)   ((state)->shared && (state)->worker != -1)
 
#define LEADER(state)   ((state)->shared && (state)->worker == -1)
 
#define ST_SORT   qsort_tuple_unsigned
 
#define ST_ELEMENT_TYPE   SortTuple
 
#define ST_COMPARE(a, b, state)   qsort_tuple_unsigned_compare(a, b, state)
 
#define ST_COMPARE_ARG_TYPE   Tuplesortstate
 
#define ST_CHECK_FOR_INTERRUPTS
 
#define ST_SCOPE   static
 
#define ST_DEFINE
 
#define ST_SORT   qsort_tuple_signed
 
#define ST_ELEMENT_TYPE   SortTuple
 
#define ST_COMPARE(a, b, state)   qsort_tuple_signed_compare(a, b, state)
 
#define ST_COMPARE_ARG_TYPE   Tuplesortstate
 
#define ST_CHECK_FOR_INTERRUPTS
 
#define ST_SCOPE   static
 
#define ST_DEFINE
 
#define ST_SORT   qsort_tuple_int32
 
#define ST_ELEMENT_TYPE   SortTuple
 
#define ST_COMPARE(a, b, state)   qsort_tuple_int32_compare(a, b, state)
 
#define ST_COMPARE_ARG_TYPE   Tuplesortstate
 
#define ST_CHECK_FOR_INTERRUPTS
 
#define ST_SCOPE   static
 
#define ST_DEFINE
 
#define ST_SORT   qsort_tuple
 
#define ST_ELEMENT_TYPE   SortTuple
 
#define ST_COMPARE_RUNTIME_POINTER
 
#define ST_COMPARE_ARG_TYPE   Tuplesortstate
 
#define ST_CHECK_FOR_INTERRUPTS
 
#define ST_SCOPE   static
 
#define ST_DECLARE
 
#define ST_DEFINE
 
#define ST_SORT   qsort_ssup
 
#define ST_ELEMENT_TYPE   SortTuple
 
#define ST_COMPARE(a, b, ssup)
 
#define ST_COMPARE_ARG_TYPE   SortSupportData
 
#define ST_CHECK_FOR_INTERRUPTS
 
#define ST_SCOPE   static
 
#define ST_DEFINE
 

Typedefs

typedef union SlabSlot SlabSlot
 

Enumerations

enum  TupSortStatus {
  TSS_INITIAL , TSS_BOUNDED , TSS_BUILDRUNS , TSS_SORTEDINMEM ,
  TSS_SORTEDONTAPE , TSS_FINALMERGE
}
 

Functions

static void tuplesort_begin_batch (Tuplesortstate *state)
 
static bool consider_abort_common (Tuplesortstate *state)
 
static void inittapes (Tuplesortstate *state, bool mergeruns)
 
static void inittapestate (Tuplesortstate *state, int maxTapes)
 
static void selectnewtape (Tuplesortstate *state)
 
static void init_slab_allocator (Tuplesortstate *state, int numSlots)
 
static void mergeruns (Tuplesortstate *state)
 
static void mergeonerun (Tuplesortstate *state)
 
static void beginmerge (Tuplesortstate *state)
 
static bool mergereadnext (Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
 
static void dumptuples (Tuplesortstate *state, bool alltuples)
 
static void make_bounded_heap (Tuplesortstate *state)
 
static void sort_bounded_heap (Tuplesortstate *state)
 
static void tuplesort_sort_memtuples (Tuplesortstate *state)
 
static void tuplesort_heap_insert (Tuplesortstate *state, SortTuple *tuple)
 
static void tuplesort_heap_replace_top (Tuplesortstate *state, SortTuple *tuple)
 
static void tuplesort_heap_delete_top (Tuplesortstate *state)
 
static void reversedirection (Tuplesortstate *state)
 
static unsigned int getlen (LogicalTape *tape, bool eofOK)
 
static void markrunend (LogicalTape *tape)
 
static int worker_get_identifier (Tuplesortstate *state)
 
static void worker_freeze_result_tape (Tuplesortstate *state)
 
static void worker_nomergeruns (Tuplesortstate *state)
 
static void leader_takeover_tapes (Tuplesortstate *state)
 
static void free_sort_tuple (Tuplesortstate *state, SortTuple *stup)
 
static void tuplesort_free (Tuplesortstate *state)
 
static void tuplesort_updatemax (Tuplesortstate *state)
 
static pg_attribute_always_inline int qsort_tuple_unsigned_compare (SortTuple *a, SortTuple *b, Tuplesortstate *state)
 
static pg_attribute_always_inline int qsort_tuple_signed_compare (SortTuple *a, SortTuple *b, Tuplesortstate *state)
 
static pg_attribute_always_inline int qsort_tuple_int32_compare (SortTuple *a, SortTuple *b, Tuplesortstate *state)
 
Tuplesortstatetuplesort_begin_common (int workMem, SortCoordinate coordinate, int sortopt)
 
void tuplesort_set_bound (Tuplesortstate *state, int64 bound)
 
bool tuplesort_used_bound (Tuplesortstate *state)
 
void tuplesort_end (Tuplesortstate *state)
 
void tuplesort_reset (Tuplesortstate *state)
 
static bool grow_memtuples (Tuplesortstate *state)
 
void tuplesort_puttuple_common (Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
 
void tuplesort_performsort (Tuplesortstate *state)
 
bool tuplesort_gettuple_common (Tuplesortstate *state, bool forward, SortTuple *stup)
 
bool tuplesort_skiptuples (Tuplesortstate *state, int64 ntuples, bool forward)
 
int tuplesort_merge_order (int64 allowedMem)
 
static int64 merge_read_buffer_size (int64 avail_mem, int nInputTapes, int nInputRuns, int maxOutputTapes)
 
void tuplesort_rescan (Tuplesortstate *state)
 
void tuplesort_markpos (Tuplesortstate *state)
 
void tuplesort_restorepos (Tuplesortstate *state)
 
void tuplesort_get_stats (Tuplesortstate *state, TuplesortInstrumentation *stats)
 
const char * tuplesort_method_name (TuplesortMethod m)
 
const char * tuplesort_space_type_name (TuplesortSpaceType t)
 
void * tuplesort_readtup_alloc (Tuplesortstate *state, Size tuplen)
 
Size tuplesort_estimate_shared (int nWorkers)
 
void tuplesort_initialize_shared (Sharedsort *shared, int nWorkers, dsm_segment *seg)
 
void tuplesort_attach_shared (Sharedsort *shared, dsm_segment *seg)
 
int ssup_datum_unsigned_cmp (Datum x, Datum y, SortSupport ssup)
 
int ssup_datum_signed_cmp (Datum x, Datum y, SortSupport ssup)
 
int ssup_datum_int32_cmp (Datum x, Datum y, SortSupport ssup)
 

Variables

bool trace_sort = false
 

Macro Definition Documentation

◆ COMPARETUP

#define COMPARETUP (   state,
  a,
  b 
)    ((*(state)->base.comparetup) (a, b, state))

Definition at line 394 of file tuplesort.c.

◆ FREEMEM

#define FREEMEM (   state,
  amt 
)    ((state)->availMem += (amt))

Definition at line 400 of file tuplesort.c.

◆ FREESTATE

#define FREESTATE (   state)    ((state)->base.freestate ? (*(state)->base.freestate) (state) : (void) 0)

Definition at line 397 of file tuplesort.c.

◆ INITIAL_MEMTUPSIZE

#define INITIAL_MEMTUPSIZE
Value:
Max(1024, \
#define Max(x, y)
Definition: c.h:1010
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:187

Definition at line 118 of file tuplesort.c.

◆ IS_SLAB_SLOT

#define IS_SLAB_SLOT (   state,
  tuple 
)
Value:
((char *) (tuple) >= (state)->slabMemoryBegin && \
(char *) (tuple) < (state)->slabMemoryEnd)
Definition: regguts.h:323

Definition at line 373 of file tuplesort.c.

◆ LACKMEM

#define LACKMEM (   state)    ((state)->availMem < 0 && !(state)->slabAllocatorUsed)

Definition at line 398 of file tuplesort.c.

◆ LEADER

#define LEADER (   state)    ((state)->shared && (state)->worker == -1)

Definition at line 403 of file tuplesort.c.

◆ MAXORDER

#define MAXORDER   500 /* maximum merge order */

Definition at line 175 of file tuplesort.c.

◆ MERGE_BUFFER_SIZE

#define MERGE_BUFFER_SIZE   (BLCKSZ * 32)

Definition at line 177 of file tuplesort.c.

◆ MINORDER

#define MINORDER   6 /* minimum merge order */

Definition at line 174 of file tuplesort.c.

◆ READTUP

#define READTUP (   state,
  stup,
  tape,
  len 
)    ((*(state)->base.readtup) (state, stup, tape, len))

Definition at line 396 of file tuplesort.c.

◆ RELEASE_SLAB_SLOT

#define RELEASE_SLAB_SLOT (   state,
  tuple 
)
Value:
do { \
SlabSlot *buf = (SlabSlot *) tuple; \
{ \
buf->nextfree = (state)->slabFreeHead; \
(state)->slabFreeHead = buf; \
} while(0)
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
void pfree(void *pointer)
Definition: mcxt.c:1594
static char buf[DEFAULT_XLOG_SEG_SIZE]
Definition: pg_test_fsync.c:71
#define IS_SLAB_SLOT(state, tuple)
Definition: tuplesort.c:373

Definition at line 381 of file tuplesort.c.

◆ REMOVEABBREV

#define REMOVEABBREV (   state,
  stup,
  count 
)    ((*(state)->base.removeabbrev) (state, stup, count))

Definition at line 393 of file tuplesort.c.

◆ SERIAL

#define SERIAL (   state)    ((state)->shared == NULL)

Definition at line 401 of file tuplesort.c.

◆ SLAB_SLOT_SIZE

#define SLAB_SLOT_SIZE   1024

Definition at line 140 of file tuplesort.c.

◆ ST_CHECK_FOR_INTERRUPTS [1/5]

#define ST_CHECK_FOR_INTERRUPTS

Definition at line 611 of file tuplesort.c.

◆ ST_CHECK_FOR_INTERRUPTS [2/5]

#define ST_CHECK_FOR_INTERRUPTS

Definition at line 611 of file tuplesort.c.

◆ ST_CHECK_FOR_INTERRUPTS [3/5]

#define ST_CHECK_FOR_INTERRUPTS

Definition at line 611 of file tuplesort.c.

◆ ST_CHECK_FOR_INTERRUPTS [4/5]

#define ST_CHECK_FOR_INTERRUPTS

Definition at line 611 of file tuplesort.c.

◆ ST_CHECK_FOR_INTERRUPTS [5/5]

#define ST_CHECK_FOR_INTERRUPTS

Definition at line 611 of file tuplesort.c.

◆ ST_COMPARE [1/4]

#define ST_COMPARE (   a,
  b,
  ssup 
)
Value:
ApplySortComparator((a)->datum1, (a)->isnull1, \
(b)->datum1, (b)->isnull1, (ssup))
int b
Definition: isn.c:74
int a
Definition: isn.c:73
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:200

Definition at line 607 of file tuplesort.c.

◆ ST_COMPARE [2/4]

#define ST_COMPARE (   a,
  b,
  state 
)    qsort_tuple_unsigned_compare(a, b, state)

Definition at line 607 of file tuplesort.c.

◆ ST_COMPARE [3/4]

#define ST_COMPARE (   a,
  b,
  state 
)    qsort_tuple_signed_compare(a, b, state)

Definition at line 607 of file tuplesort.c.

◆ ST_COMPARE [4/4]

#define ST_COMPARE (   a,
  b,
  state 
)    qsort_tuple_int32_compare(a, b, state)

Definition at line 607 of file tuplesort.c.

◆ ST_COMPARE_ARG_TYPE [1/5]

#define ST_COMPARE_ARG_TYPE   Tuplesortstate

Definition at line 610 of file tuplesort.c.

◆ ST_COMPARE_ARG_TYPE [2/5]

#define ST_COMPARE_ARG_TYPE   Tuplesortstate

Definition at line 610 of file tuplesort.c.

◆ ST_COMPARE_ARG_TYPE [3/5]

#define ST_COMPARE_ARG_TYPE   Tuplesortstate

Definition at line 610 of file tuplesort.c.

◆ ST_COMPARE_ARG_TYPE [4/5]

#define ST_COMPARE_ARG_TYPE   Tuplesortstate

Definition at line 610 of file tuplesort.c.

◆ ST_COMPARE_ARG_TYPE [5/5]

#define ST_COMPARE_ARG_TYPE   SortSupportData

Definition at line 610 of file tuplesort.c.

◆ ST_COMPARE_RUNTIME_POINTER

#define ST_COMPARE_RUNTIME_POINTER

Definition at line 597 of file tuplesort.c.

◆ ST_DECLARE

#define ST_DECLARE

Definition at line 601 of file tuplesort.c.

◆ ST_DEFINE [1/5]

#define ST_DEFINE

Definition at line 613 of file tuplesort.c.

◆ ST_DEFINE [2/5]

#define ST_DEFINE

Definition at line 613 of file tuplesort.c.

◆ ST_DEFINE [3/5]

#define ST_DEFINE

Definition at line 613 of file tuplesort.c.

◆ ST_DEFINE [4/5]

#define ST_DEFINE

Definition at line 613 of file tuplesort.c.

◆ ST_DEFINE [5/5]

#define ST_DEFINE

Definition at line 613 of file tuplesort.c.

◆ ST_ELEMENT_TYPE [1/5]

#define ST_ELEMENT_TYPE   SortTuple

Definition at line 606 of file tuplesort.c.

◆ ST_ELEMENT_TYPE [2/5]

#define ST_ELEMENT_TYPE   SortTuple

Definition at line 606 of file tuplesort.c.

◆ ST_ELEMENT_TYPE [3/5]

#define ST_ELEMENT_TYPE   SortTuple

Definition at line 606 of file tuplesort.c.

◆ ST_ELEMENT_TYPE [4/5]

#define ST_ELEMENT_TYPE   SortTuple

Definition at line 606 of file tuplesort.c.

◆ ST_ELEMENT_TYPE [5/5]

#define ST_ELEMENT_TYPE   SortTuple

Definition at line 606 of file tuplesort.c.

◆ ST_SCOPE [1/5]

#define ST_SCOPE   static

Definition at line 612 of file tuplesort.c.

◆ ST_SCOPE [2/5]

#define ST_SCOPE   static

Definition at line 612 of file tuplesort.c.

◆ ST_SCOPE [3/5]

#define ST_SCOPE   static

Definition at line 612 of file tuplesort.c.

◆ ST_SCOPE [4/5]

#define ST_SCOPE   static

Definition at line 612 of file tuplesort.c.

◆ ST_SCOPE [5/5]

#define ST_SCOPE   static

Definition at line 612 of file tuplesort.c.

◆ ST_SORT [1/5]

#define ST_SORT   qsort_tuple_unsigned

Definition at line 605 of file tuplesort.c.

◆ ST_SORT [2/5]

#define ST_SORT   qsort_tuple_signed

Definition at line 605 of file tuplesort.c.

◆ ST_SORT [3/5]

#define ST_SORT   qsort_tuple_int32

Definition at line 605 of file tuplesort.c.

◆ ST_SORT [4/5]

#define ST_SORT   qsort_tuple

Definition at line 605 of file tuplesort.c.

◆ ST_SORT [5/5]

#define ST_SORT   qsort_ssup

Definition at line 605 of file tuplesort.c.

◆ TAPE_BUFFER_OVERHEAD

#define TAPE_BUFFER_OVERHEAD   BLCKSZ

Definition at line 176 of file tuplesort.c.

◆ USEMEM

#define USEMEM (   state,
  amt 
)    ((state)->availMem -= (amt))

Definition at line 399 of file tuplesort.c.

◆ WORKER

#define WORKER (   state)    ((state)->shared && (state)->worker != -1)

Definition at line 402 of file tuplesort.c.

◆ WRITETUP

#define WRITETUP (   state,
  tape,
  stup 
)    ((*(state)->base.writetup) (state, tape, stup))

Definition at line 395 of file tuplesort.c.

Typedef Documentation

◆ SlabSlot

typedef union SlabSlot SlabSlot

Enumeration Type Documentation

◆ TupSortStatus

Enumerator
TSS_INITIAL 
TSS_BOUNDED 
TSS_BUILDRUNS 
TSS_SORTEDINMEM 
TSS_SORTEDONTAPE 
TSS_FINALMERGE 

Definition at line 152 of file tuplesort.c.

153{
154 TSS_INITIAL, /* Loading tuples; still within memory limit */
155 TSS_BOUNDED, /* Loading tuples into bounded-size heap */
156 TSS_BUILDRUNS, /* Loading tuples; writing to tape */
157 TSS_SORTEDINMEM, /* Sort completed entirely in memory */
158 TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */
159 TSS_FINALMERGE, /* Performing final merge on-the-fly */
TupSortStatus
Definition: tuplesort.c:153
@ TSS_SORTEDONTAPE
Definition: tuplesort.c:158
@ TSS_SORTEDINMEM
Definition: tuplesort.c:157
@ TSS_INITIAL
Definition: tuplesort.c:154
@ TSS_FINALMERGE
Definition: tuplesort.c:159
@ TSS_BUILDRUNS
Definition: tuplesort.c:156
@ TSS_BOUNDED
Definition: tuplesort.c:155

Function Documentation

◆ beginmerge()

static void beginmerge ( Tuplesortstate state)
static

Definition at line 2246 of file tuplesort.c.

2247{
2248 int activeTapes;
2249 int srcTapeIndex;
2250
2251 /* Heap should be empty here */
2252 Assert(state->memtupcount == 0);
2253
2254 activeTapes = Min(state->nInputTapes, state->nInputRuns);
2255
2256 for (srcTapeIndex = 0; srcTapeIndex < activeTapes; srcTapeIndex++)
2257 {
2258 SortTuple tup;
2259
2260 if (mergereadnext(state, state->inputTapes[srcTapeIndex], &tup))
2261 {
2262 tup.srctape = srcTapeIndex;
2264 }
2265 }
2266}
#define Min(x, y)
Definition: c.h:1016
Assert(PointerIsAligned(start, uint64))
int srctape
Definition: tuplesort.h:153
static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple)
Definition: tuplesort.c:2723
static bool mergereadnext(Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
Definition: tuplesort.c:2274

References Assert(), mergereadnext(), Min, SortTuple::srctape, and tuplesort_heap_insert().

Referenced by mergeonerun(), and mergeruns().

◆ consider_abort_common()

static bool consider_abort_common ( Tuplesortstate state)
static

Definition at line 1305 of file tuplesort.c.

1306{
1307 Assert(state->base.sortKeys[0].abbrev_converter != NULL);
1308 Assert(state->base.sortKeys[0].abbrev_abort != NULL);
1309 Assert(state->base.sortKeys[0].abbrev_full_comparator != NULL);
1310
1311 /*
1312 * Check effectiveness of abbreviation optimization. Consider aborting
1313 * when still within memory limit.
1314 */
1315 if (state->status == TSS_INITIAL &&
1316 state->memtupcount >= state->abbrevNext)
1317 {
1318 state->abbrevNext *= 2;
1319
1320 /*
1321 * Check opclass-supplied abbreviation abort routine. It may indicate
1322 * that abbreviation should not proceed.
1323 */
1324 if (!state->base.sortKeys->abbrev_abort(state->memtupcount,
1325 state->base.sortKeys))
1326 return false;
1327
1328 /*
1329 * Finally, restore authoritative comparator, and indicate that
1330 * abbreviation is not in play by setting abbrev_converter to NULL
1331 */
1332 state->base.sortKeys[0].comparator = state->base.sortKeys[0].abbrev_full_comparator;
1333 state->base.sortKeys[0].abbrev_converter = NULL;
1334 /* Not strictly necessary, but be tidy */
1335 state->base.sortKeys[0].abbrev_abort = NULL;
1336 state->base.sortKeys[0].abbrev_full_comparator = NULL;
1337
1338 /* Give up - expect original pass-by-value representation */
1339 return true;
1340 }
1341
1342 return false;
1343}

References Assert(), and TSS_INITIAL.

Referenced by tuplesort_puttuple_common().

◆ dumptuples()

static void dumptuples ( Tuplesortstate state,
bool  alltuples 
)
static

Definition at line 2293 of file tuplesort.c.

2294{
2295 int memtupwrite;
2296 int i;
2297
2298 /*
2299 * Nothing to do if we still fit in available memory and have array slots,
2300 * unless this is the final call during initial run generation.
2301 */
2302 if (state->memtupcount < state->memtupsize && !LACKMEM(state) &&
2303 !alltuples)
2304 return;
2305
2306 /*
2307 * Final call might require no sorting, in rare cases where we just so
2308 * happen to have previously LACKMEM()'d at the point where exactly all
2309 * remaining tuples are loaded into memory, just before input was
2310 * exhausted. In general, short final runs are quite possible, but avoid
2311 * creating a completely empty run. In a worker, though, we must produce
2312 * at least one tape, even if it's empty.
2313 */
2314 if (state->memtupcount == 0 && state->currentRun > 0)
2315 return;
2316
2317 Assert(state->status == TSS_BUILDRUNS);
2318
2319 /*
2320 * It seems unlikely that this limit will ever be exceeded, but take no
2321 * chances
2322 */
2323 if (state->currentRun == INT_MAX)
2324 ereport(ERROR,
2325 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2326 errmsg("cannot have more than %d runs for an external sort",
2327 INT_MAX)));
2328
2329 if (state->currentRun > 0)
2331
2332 state->currentRun++;
2333
2334 if (trace_sort)
2335 elog(LOG, "worker %d starting quicksort of run %d: %s",
2336 state->worker, state->currentRun,
2337 pg_rusage_show(&state->ru_start));
2338
2339 /*
2340 * Sort all tuples accumulated within the allowed amount of memory for
2341 * this run using quicksort
2342 */
2344
2345 if (trace_sort)
2346 elog(LOG, "worker %d finished quicksort of run %d: %s",
2347 state->worker, state->currentRun,
2348 pg_rusage_show(&state->ru_start));
2349
2350 memtupwrite = state->memtupcount;
2351 for (i = 0; i < memtupwrite; i++)
2352 {
2353 SortTuple *stup = &state->memtuples[i];
2354
2355 WRITETUP(state, state->destTape, stup);
2356 }
2357
2358 state->memtupcount = 0;
2359
2360 /*
2361 * Reset tuple memory. We've freed all of the tuples that we previously
2362 * allocated. It's important to avoid fragmentation when there is a stark
2363 * change in the sizes of incoming tuples. In bounded sorts,
2364 * fragmentation due to AllocSetFree's bucketing by size class might be
2365 * particularly bad if this step wasn't taken.
2366 */
2367 MemoryContextReset(state->base.tuplecontext);
2368
2369 /*
2370 * Now update the memory accounting to subtract the memory used by the
2371 * tuple.
2372 */
2373 FREEMEM(state, state->tupleMem);
2374 state->tupleMem = 0;
2375
2376 markrunend(state->destTape);
2377
2378 if (trace_sort)
2379 elog(LOG, "worker %d finished writing run %d to tape %d: %s",
2380 state->worker, state->currentRun, (state->currentRun - 1) % state->nOutputTapes + 1,
2381 pg_rusage_show(&state->ru_start));
2382}
int errcode(int sqlerrcode)
Definition: elog.c:863
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define LOG
Definition: elog.h:31
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
int i
Definition: isn.c:77
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:400
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
static void selectnewtape(Tuplesortstate *state)
Definition: tuplesort.c:1934
static void markrunend(LogicalTape *tape)
Definition: tuplesort.c:2853
#define LACKMEM(state)
Definition: tuplesort.c:398
#define WRITETUP(state, tape, stup)
Definition: tuplesort.c:395
#define FREEMEM(state, amt)
Definition: tuplesort.c:400
static void tuplesort_sort_memtuples(Tuplesortstate *state)
Definition: tuplesort.c:2662
bool trace_sort
Definition: tuplesort.c:122

References Assert(), elog, ereport, errcode(), errmsg(), ERROR, FREEMEM, i, LACKMEM, LOG, markrunend(), MemoryContextReset(), pg_rusage_show(), selectnewtape(), trace_sort, TSS_BUILDRUNS, tuplesort_sort_memtuples(), and WRITETUP.

Referenced by tuplesort_performsort(), and tuplesort_puttuple_common().

◆ free_sort_tuple()

static void free_sort_tuple ( Tuplesortstate state,
SortTuple stup 
)
static

Definition at line 3112 of file tuplesort.c.

3113{
3114 if (stup->tuple)
3115 {
3117 pfree(stup->tuple);
3118 stup->tuple = NULL;
3119 }
3120}
Size GetMemoryChunkSpace(void *pointer)
Definition: mcxt.c:767
void * tuple
Definition: tuplesort.h:150

References FREEMEM, GetMemoryChunkSpace(), pfree(), and SortTuple::tuple.

Referenced by make_bounded_heap(), and tuplesort_puttuple_common().

◆ getlen()

static unsigned int getlen ( LogicalTape tape,
bool  eofOK 
)
static

Definition at line 2840 of file tuplesort.c.

2841{
2842 unsigned int len;
2843
2844 if (LogicalTapeRead(tape,
2845 &len, sizeof(len)) != sizeof(len))
2846 elog(ERROR, "unexpected end of tape");
2847 if (len == 0 && !eofOK)
2848 elog(ERROR, "unexpected end of data");
2849 return len;
2850}
size_t LogicalTapeRead(LogicalTape *lt, void *ptr, size_t size)
Definition: logtape.c:928
const void size_t len

References elog, ERROR, len, and LogicalTapeRead().

Referenced by mergereadnext(), and tuplesort_gettuple_common().

◆ grow_memtuples()

static bool grow_memtuples ( Tuplesortstate state)
static

Definition at line 1038 of file tuplesort.c.

1039{
1040 int newmemtupsize;
1041 int memtupsize = state->memtupsize;
1042 int64 memNowUsed = state->allowedMem - state->availMem;
1043
1044 /* Forget it if we've already maxed out memtuples, per comment above */
1045 if (!state->growmemtuples)
1046 return false;
1047
1048 /* Select new value of memtupsize */
1049 if (memNowUsed <= state->availMem)
1050 {
1051 /*
1052 * We've used no more than half of allowedMem; double our usage,
1053 * clamping at INT_MAX tuples.
1054 */
1055 if (memtupsize < INT_MAX / 2)
1056 newmemtupsize = memtupsize * 2;
1057 else
1058 {
1059 newmemtupsize = INT_MAX;
1060 state->growmemtuples = false;
1061 }
1062 }
1063 else
1064 {
1065 /*
1066 * This will be the last increment of memtupsize. Abandon doubling
1067 * strategy and instead increase as much as we safely can.
1068 *
1069 * To stay within allowedMem, we can't increase memtupsize by more
1070 * than availMem / sizeof(SortTuple) elements. In practice, we want
1071 * to increase it by considerably less, because we need to leave some
1072 * space for the tuples to which the new array slots will refer. We
1073 * assume the new tuples will be about the same size as the tuples
1074 * we've already seen, and thus we can extrapolate from the space
1075 * consumption so far to estimate an appropriate new size for the
1076 * memtuples array. The optimal value might be higher or lower than
1077 * this estimate, but it's hard to know that in advance. We again
1078 * clamp at INT_MAX tuples.
1079 *
1080 * This calculation is safe against enlarging the array so much that
1081 * LACKMEM becomes true, because the memory currently used includes
1082 * the present array; thus, there would be enough allowedMem for the
1083 * new array elements even if no other memory were currently used.
1084 *
1085 * We do the arithmetic in float8, because otherwise the product of
1086 * memtupsize and allowedMem could overflow. Any inaccuracy in the
1087 * result should be insignificant; but even if we computed a
1088 * completely insane result, the checks below will prevent anything
1089 * really bad from happening.
1090 */
1091 double grow_ratio;
1092
1093 grow_ratio = (double) state->allowedMem / (double) memNowUsed;
1094 if (memtupsize * grow_ratio < INT_MAX)
1095 newmemtupsize = (int) (memtupsize * grow_ratio);
1096 else
1097 newmemtupsize = INT_MAX;
1098
1099 /* We won't make any further enlargement attempts */
1100 state->growmemtuples = false;
1101 }
1102
1103 /* Must enlarge array by at least one element, else report failure */
1104 if (newmemtupsize <= memtupsize)
1105 goto noalloc;
1106
1107 /*
1108 * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
1109 * to ensure our request won't be rejected. Note that we can easily
1110 * exhaust address space before facing this outcome. (This is presently
1111 * impossible due to guc.c's MAX_KILOBYTES limitation on work_mem, but
1112 * don't rely on that at this distance.)
1113 */
1114 if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
1115 {
1116 newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
1117 state->growmemtuples = false; /* can't grow any more */
1118 }
1119
1120 /*
1121 * We need to be sure that we do not cause LACKMEM to become true, else
1122 * the space management algorithm will go nuts. The code above should
1123 * never generate a dangerous request, but to be safe, check explicitly
1124 * that the array growth fits within availMem. (We could still cause
1125 * LACKMEM if the memory chunk overhead associated with the memtuples
1126 * array were to increase. That shouldn't happen because we chose the
1127 * initial array size large enough to ensure that palloc will be treating
1128 * both old and new arrays as separate chunks. But we'll check LACKMEM
1129 * explicitly below just in case.)
1130 */
1131 if (state->availMem < (int64) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
1132 goto noalloc;
1133
1134 /* OK, do it */
1135 FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
1136 state->memtupsize = newmemtupsize;
1137 state->memtuples = (SortTuple *)
1138 repalloc_huge(state->memtuples,
1139 state->memtupsize * sizeof(SortTuple));
1140 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
1141 if (LACKMEM(state))
1142 elog(ERROR, "unexpected out-of-memory situation in tuplesort");
1143 return true;
1144
1145noalloc:
1146 /* If for any reason we didn't realloc, shut off future attempts */
1147 state->growmemtuples = false;
1148 return false;
1149}
int64_t int64
Definition: c.h:549
size_t Size
Definition: c.h:624
void * repalloc_huge(void *pointer, Size size)
Definition: mcxt.c:1735
#define MaxAllocHugeSize
Definition: memutils.h:45
#define USEMEM(state, amt)
Definition: tuplesort.c:399

References elog, ERROR, FREEMEM, GetMemoryChunkSpace(), LACKMEM, MaxAllocHugeSize, repalloc_huge(), and USEMEM.

Referenced by tuplesort_puttuple_common().

◆ init_slab_allocator()

static void init_slab_allocator ( Tuplesortstate state,
int  numSlots 
)
static

Definition at line 1967 of file tuplesort.c.

1968{
1969 if (numSlots > 0)
1970 {
1971 char *p;
1972 int i;
1973
1974 state->slabMemoryBegin = palloc(numSlots * SLAB_SLOT_SIZE);
1975 state->slabMemoryEnd = state->slabMemoryBegin +
1976 numSlots * SLAB_SLOT_SIZE;
1977 state->slabFreeHead = (SlabSlot *) state->slabMemoryBegin;
1978 USEMEM(state, numSlots * SLAB_SLOT_SIZE);
1979
1980 p = state->slabMemoryBegin;
1981 for (i = 0; i < numSlots - 1; i++)
1982 {
1983 ((SlabSlot *) p)->nextfree = (SlabSlot *) (p + SLAB_SLOT_SIZE);
1984 p += SLAB_SLOT_SIZE;
1985 }
1986 ((SlabSlot *) p)->nextfree = NULL;
1987 }
1988 else
1989 {
1990 state->slabMemoryBegin = state->slabMemoryEnd = NULL;
1991 state->slabFreeHead = NULL;
1992 }
1993 state->slabAllocatorUsed = true;
1994}
void * palloc(Size size)
Definition: mcxt.c:1365
#define SLAB_SLOT_SIZE
Definition: tuplesort.c:140

References i, palloc(), SLAB_SLOT_SIZE, and USEMEM.

Referenced by mergeruns().

◆ inittapes()

static void inittapes ( Tuplesortstate state,
bool  mergeruns 
)
static

Definition at line 1851 of file tuplesort.c.

1852{
1853 Assert(!LEADER(state));
1854
1855 if (mergeruns)
1856 {
1857 /* Compute number of input tapes to use when merging */
1858 state->maxTapes = tuplesort_merge_order(state->allowedMem);
1859 }
1860 else
1861 {
1862 /* Workers can sometimes produce single run, output without merge */
1864 state->maxTapes = MINORDER;
1865 }
1866
1867 if (trace_sort)
1868 elog(LOG, "worker %d switching to external sort with %d tapes: %s",
1869 state->worker, state->maxTapes, pg_rusage_show(&state->ru_start));
1870
1871 /* Create the tape set */
1872 inittapestate(state, state->maxTapes);
1873 state->tapeset =
1875 state->shared ? &state->shared->fileset : NULL,
1876 state->worker);
1877
1878 state->currentRun = 0;
1879
1880 /*
1881 * Initialize logical tape arrays.
1882 */
1883 state->inputTapes = NULL;
1884 state->nInputTapes = 0;
1885 state->nInputRuns = 0;
1886
1887 state->outputTapes = palloc0(state->maxTapes * sizeof(LogicalTape *));
1888 state->nOutputTapes = 0;
1889 state->nOutputRuns = 0;
1890
1891 state->status = TSS_BUILDRUNS;
1892
1894}
LogicalTapeSet * LogicalTapeSetCreate(bool preallocate, SharedFileSet *fileset, int worker)
Definition: logtape.c:556
void * palloc0(Size size)
Definition: mcxt.c:1395
int tuplesort_merge_order(int64 allowedMem)
Definition: tuplesort.c:1764
static void inittapestate(Tuplesortstate *state, int maxTapes)
Definition: tuplesort.c:1900
#define LEADER(state)
Definition: tuplesort.c:403
#define WORKER(state)
Definition: tuplesort.c:402
static void mergeruns(Tuplesortstate *state)
Definition: tuplesort.c:2003
#define MINORDER
Definition: tuplesort.c:174

References Assert(), elog, inittapestate(), LEADER, LOG, LogicalTapeSetCreate(), mergeruns(), MINORDER, palloc0(), pg_rusage_show(), selectnewtape(), trace_sort, TSS_BUILDRUNS, tuplesort_merge_order(), and WORKER.

Referenced by tuplesort_performsort(), and tuplesort_puttuple_common().

◆ inittapestate()

static void inittapestate ( Tuplesortstate state,
int  maxTapes 
)
static

Definition at line 1900 of file tuplesort.c.

1901{
1902 int64 tapeSpace;
1903
1904 /*
1905 * Decrease availMem to reflect the space needed for tape buffers; but
1906 * don't decrease it to the point that we have no room for tuples. (That
1907 * case is only likely to occur if sorting pass-by-value Datums; in all
1908 * other scenarios the memtuples[] array is unlikely to occupy more than
1909 * half of allowedMem. In the pass-by-value case it's not important to
1910 * account for tuple space, so we don't care if LACKMEM becomes
1911 * inaccurate.)
1912 */
1913 tapeSpace = (int64) maxTapes * TAPE_BUFFER_OVERHEAD;
1914
1915 if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
1916 USEMEM(state, tapeSpace);
1917
1918 /*
1919 * Make sure that the temp file(s) underlying the tape set are created in
1920 * suitable temp tablespaces. For parallel sorts, this should have been
1921 * called already, but it doesn't matter if it is called a second time.
1922 */
1924}
void PrepareTempTablespaces(void)
Definition: tablespace.c:1331
#define TAPE_BUFFER_OVERHEAD
Definition: tuplesort.c:176

References GetMemoryChunkSpace(), PrepareTempTablespaces(), TAPE_BUFFER_OVERHEAD, and USEMEM.

Referenced by inittapes(), and leader_takeover_tapes().

◆ leader_takeover_tapes()

static void leader_takeover_tapes ( Tuplesortstate state)
static

Definition at line 3053 of file tuplesort.c.

3054{
3055 Sharedsort *shared = state->shared;
3056 int nParticipants = state->nParticipants;
3057 int workersFinished;
3058 int j;
3059
3061 Assert(nParticipants >= 1);
3062
3063 SpinLockAcquire(&shared->mutex);
3064 workersFinished = shared->workersFinished;
3065 SpinLockRelease(&shared->mutex);
3066
3067 if (nParticipants != workersFinished)
3068 elog(ERROR, "cannot take over tapes before all workers finish");
3069
3070 /*
3071 * Create the tapeset from worker tapes, including a leader-owned tape at
3072 * the end. Parallel workers are far more expensive than logical tapes,
3073 * so the number of tapes allocated here should never be excessive.
3074 */
3075 inittapestate(state, nParticipants);
3076 state->tapeset = LogicalTapeSetCreate(false, &shared->fileset, -1);
3077
3078 /*
3079 * Set currentRun to reflect the number of runs we will merge (it's not
3080 * used for anything, this is just pro forma)
3081 */
3082 state->currentRun = nParticipants;
3083
3084 /*
3085 * Initialize the state to look the same as after building the initial
3086 * runs.
3087 *
3088 * There will always be exactly 1 run per worker, and exactly one input
3089 * tape per run, because workers always output exactly 1 run, even when
3090 * there were no input tuples for workers to sort.
3091 */
3092 state->inputTapes = NULL;
3093 state->nInputTapes = 0;
3094 state->nInputRuns = 0;
3095
3096 state->outputTapes = palloc0(nParticipants * sizeof(LogicalTape *));
3097 state->nOutputTapes = nParticipants;
3098 state->nOutputRuns = nParticipants;
3099
3100 for (j = 0; j < nParticipants; j++)
3101 {
3102 state->outputTapes[j] = LogicalTapeImport(state->tapeset, j, &shared->tapes[j]);
3103 }
3104
3105 state->status = TSS_BUILDRUNS;
3106}
int j
Definition: isn.c:78
LogicalTape * LogicalTapeImport(LogicalTapeSet *lts, int worker, TapeShare *shared)
Definition: logtape.c:609
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
SharedFileSet fileset
Definition: tuplesort.c:358
TapeShare tapes[FLEXIBLE_ARRAY_MEMBER]
Definition: tuplesort.c:367
int workersFinished
Definition: tuplesort.c:355
slock_t mutex
Definition: tuplesort.c:344

References Assert(), elog, ERROR, Sharedsort::fileset, inittapestate(), j, LEADER, LogicalTapeImport(), LogicalTapeSetCreate(), Sharedsort::mutex, palloc0(), SpinLockAcquire, SpinLockRelease, Sharedsort::tapes, TSS_BUILDRUNS, and Sharedsort::workersFinished.

Referenced by tuplesort_performsort().

◆ make_bounded_heap()

static void make_bounded_heap ( Tuplesortstate state)
static

Definition at line 2573 of file tuplesort.c.

2574{
2575 int tupcount = state->memtupcount;
2576 int i;
2577
2578 Assert(state->status == TSS_INITIAL);
2579 Assert(state->bounded);
2580 Assert(tupcount >= state->bound);
2582
2583 /* Reverse sort direction so largest entry will be at root */
2585
2586 state->memtupcount = 0; /* make the heap empty */
2587 for (i = 0; i < tupcount; i++)
2588 {
2589 if (state->memtupcount < state->bound)
2590 {
2591 /* Insert next tuple into heap */
2592 /* Must copy source tuple to avoid possible overwrite */
2593 SortTuple stup = state->memtuples[i];
2594
2596 }
2597 else
2598 {
2599 /*
2600 * The heap is full. Replace the largest entry with the new
2601 * tuple, or just discard it, if it's larger than anything already
2602 * in the heap.
2603 */
2604 if (COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
2605 {
2606 free_sort_tuple(state, &state->memtuples[i]);
2608 }
2609 else
2610 tuplesort_heap_replace_top(state, &state->memtuples[i]);
2611 }
2612 }
2613
2614 Assert(state->memtupcount == state->bound);
2615 state->status = TSS_BOUNDED;
2616}
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
#define COMPARETUP(state, a, b)
Definition: tuplesort.c:394
#define SERIAL(state)
Definition: tuplesort.c:401
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
Definition: tuplesort.c:3112
static void reversedirection(Tuplesortstate *state)
Definition: tuplesort.c:2822
static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple)
Definition: tuplesort.c:2782

References Assert(), CHECK_FOR_INTERRUPTS, COMPARETUP, free_sort_tuple(), i, reversedirection(), SERIAL, TSS_BOUNDED, TSS_INITIAL, tuplesort_heap_insert(), and tuplesort_heap_replace_top().

Referenced by tuplesort_puttuple_common().

◆ markrunend()

static void markrunend ( LogicalTape tape)
static

Definition at line 2853 of file tuplesort.c.

2854{
2855 unsigned int len = 0;
2856
2857 LogicalTapeWrite(tape, &len, sizeof(len));
2858}
void LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size)
Definition: logtape.c:761

References len, and LogicalTapeWrite().

Referenced by dumptuples(), and mergeonerun().

◆ merge_read_buffer_size()

static int64 merge_read_buffer_size ( int64  avail_mem,
int  nInputTapes,
int  nInputRuns,
int  maxOutputTapes 
)
static

Definition at line 1819 of file tuplesort.c.

1821{
1822 int nOutputRuns;
1823 int nOutputTapes;
1824
1825 /*
1826 * How many output tapes will we produce in this pass?
1827 *
1828 * This is nInputRuns / nInputTapes, rounded up.
1829 */
1830 nOutputRuns = (nInputRuns + nInputTapes - 1) / nInputTapes;
1831
1832 nOutputTapes = Min(nOutputRuns, maxOutputTapes);
1833
1834 /*
1835 * Each output tape consumes TAPE_BUFFER_OVERHEAD bytes of memory. All
1836 * remaining memory is divided evenly between the input tapes.
1837 *
1838 * This also follows from the formula in tuplesort_merge_order, but here
1839 * we derive the input buffer size from the amount of memory available,
1840 * and M and N.
1841 */
1842 return Max((avail_mem - TAPE_BUFFER_OVERHEAD * nOutputTapes) / nInputTapes, 0);
1843}

References Max, Min, and TAPE_BUFFER_OVERHEAD.

Referenced by mergeruns().

◆ mergeonerun()

static void mergeonerun ( Tuplesortstate state)
static

Definition at line 2186 of file tuplesort.c.

2187{
2188 int srcTapeIndex;
2189 LogicalTape *srcTape;
2190
2191 /*
2192 * Start the merge by loading one tuple from each active source tape into
2193 * the heap.
2194 */
2196
2197 Assert(state->slabAllocatorUsed);
2198
2199 /*
2200 * Execute merge by repeatedly extracting lowest tuple in heap, writing it
2201 * out, and replacing it with next tuple from same tape (if there is
2202 * another one).
2203 */
2204 while (state->memtupcount > 0)
2205 {
2206 SortTuple stup;
2207
2208 /* write the tuple to destTape */
2209 srcTapeIndex = state->memtuples[0].srctape;
2210 srcTape = state->inputTapes[srcTapeIndex];
2211 WRITETUP(state, state->destTape, &state->memtuples[0]);
2212
2213 /* recycle the slot of the tuple we just wrote out, for the next read */
2214 if (state->memtuples[0].tuple)
2215 RELEASE_SLAB_SLOT(state, state->memtuples[0].tuple);
2216
2217 /*
2218 * pull next tuple from the tape, and replace the written-out tuple in
2219 * the heap with it.
2220 */
2221 if (mergereadnext(state, srcTape, &stup))
2222 {
2223 stup.srctape = srcTapeIndex;
2225 }
2226 else
2227 {
2229 state->nInputRuns--;
2230 }
2231 }
2232
2233 /*
2234 * When the heap empties, we're done. Write an end-of-run marker on the
2235 * output tape.
2236 */
2237 markrunend(state->destTape);
2238}
static void tuplesort_heap_delete_top(Tuplesortstate *state)
Definition: tuplesort.c:2758
static void beginmerge(Tuplesortstate *state)
Definition: tuplesort.c:2246
#define RELEASE_SLAB_SLOT(state, tuple)
Definition: tuplesort.c:381

References Assert(), beginmerge(), markrunend(), mergereadnext(), RELEASE_SLAB_SLOT, SortTuple::srctape, tuplesort_heap_delete_top(), tuplesort_heap_replace_top(), and WRITETUP.

Referenced by mergeruns().

◆ mergereadnext()

static bool mergereadnext ( Tuplesortstate state,
LogicalTape srcTape,
SortTuple stup 
)
static

Definition at line 2274 of file tuplesort.c.

2275{
2276 unsigned int tuplen;
2277
2278 /* read next tuple, if any */
2279 if ((tuplen = getlen(srcTape, true)) == 0)
2280 return false;
2281 READTUP(state, stup, srcTape, tuplen);
2282
2283 return true;
2284}
static unsigned int getlen(LogicalTape *tape, bool eofOK)
Definition: tuplesort.c:2840
#define READTUP(state, stup, tape, len)
Definition: tuplesort.c:396

References getlen(), and READTUP.

Referenced by beginmerge(), mergeonerun(), and tuplesort_gettuple_common().

◆ mergeruns()

static void mergeruns ( Tuplesortstate state)
static

Definition at line 2003 of file tuplesort.c.

2004{
2005 int tapenum;
2006
2007 Assert(state->status == TSS_BUILDRUNS);
2008 Assert(state->memtupcount == 0);
2009
2010 if (state->base.sortKeys != NULL && state->base.sortKeys->abbrev_converter != NULL)
2011 {
2012 /*
2013 * If there are multiple runs to be merged, when we go to read back
2014 * tuples from disk, abbreviated keys will not have been stored, and
2015 * we don't care to regenerate them. Disable abbreviation from this
2016 * point on.
2017 */
2018 state->base.sortKeys->abbrev_converter = NULL;
2019 state->base.sortKeys->comparator = state->base.sortKeys->abbrev_full_comparator;
2020
2021 /* Not strictly necessary, but be tidy */
2022 state->base.sortKeys->abbrev_abort = NULL;
2023 state->base.sortKeys->abbrev_full_comparator = NULL;
2024 }
2025
2026 /*
2027 * Reset tuple memory. We've freed all the tuples that we previously
2028 * allocated. We will use the slab allocator from now on.
2029 */
2030 MemoryContextResetOnly(state->base.tuplecontext);
2031
2032 /*
2033 * We no longer need a large memtuples array. (We will allocate a smaller
2034 * one for the heap later.)
2035 */
2036 FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
2037 pfree(state->memtuples);
2038 state->memtuples = NULL;
2039
2040 /*
2041 * Initialize the slab allocator. We need one slab slot per input tape,
2042 * for the tuples in the heap, plus one to hold the tuple last returned
2043 * from tuplesort_gettuple. (If we're sorting pass-by-val Datums,
2044 * however, we don't need to do allocate anything.)
2045 *
2046 * In a multi-pass merge, we could shrink this allocation for the last
2047 * merge pass, if it has fewer tapes than previous passes, but we don't
2048 * bother.
2049 *
2050 * From this point on, we no longer use the USEMEM()/LACKMEM() mechanism
2051 * to track memory usage of individual tuples.
2052 */
2053 if (state->base.tuples)
2054 init_slab_allocator(state, state->nOutputTapes + 1);
2055 else
2057
2058 /*
2059 * Allocate a new 'memtuples' array, for the heap. It will hold one tuple
2060 * from each input tape.
2061 *
2062 * We could shrink this, too, between passes in a multi-pass merge, but we
2063 * don't bother. (The initial input tapes are still in outputTapes. The
2064 * number of input tapes will not increase between passes.)
2065 */
2066 state->memtupsize = state->nOutputTapes;
2067 state->memtuples = (SortTuple *) MemoryContextAlloc(state->base.maincontext,
2068 state->nOutputTapes * sizeof(SortTuple));
2069 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
2070
2071 /*
2072 * Use all the remaining memory we have available for tape buffers among
2073 * all the input tapes. At the beginning of each merge pass, we will
2074 * divide this memory between the input and output tapes in the pass.
2075 */
2076 state->tape_buffer_mem = state->availMem;
2077 USEMEM(state, state->tape_buffer_mem);
2078 if (trace_sort)
2079 elog(LOG, "worker %d using %zu KB of memory for tape buffers",
2080 state->worker, state->tape_buffer_mem / 1024);
2081
2082 for (;;)
2083 {
2084 /*
2085 * On the first iteration, or if we have read all the runs from the
2086 * input tapes in a multi-pass merge, it's time to start a new pass.
2087 * Rewind all the output tapes, and make them inputs for the next
2088 * pass.
2089 */
2090 if (state->nInputRuns == 0)
2091 {
2092 int64 input_buffer_size;
2093
2094 /* Close the old, emptied, input tapes */
2095 if (state->nInputTapes > 0)
2096 {
2097 for (tapenum = 0; tapenum < state->nInputTapes; tapenum++)
2098 LogicalTapeClose(state->inputTapes[tapenum]);
2099 pfree(state->inputTapes);
2100 }
2101
2102 /* Previous pass's outputs become next pass's inputs. */
2103 state->inputTapes = state->outputTapes;
2104 state->nInputTapes = state->nOutputTapes;
2105 state->nInputRuns = state->nOutputRuns;
2106
2107 /*
2108 * Reset output tape variables. The actual LogicalTapes will be
2109 * created as needed, here we only allocate the array to hold
2110 * them.
2111 */
2112 state->outputTapes = palloc0(state->nInputTapes * sizeof(LogicalTape *));
2113 state->nOutputTapes = 0;
2114 state->nOutputRuns = 0;
2115
2116 /*
2117 * Redistribute the memory allocated for tape buffers, among the
2118 * new input and output tapes.
2119 */
2120 input_buffer_size = merge_read_buffer_size(state->tape_buffer_mem,
2121 state->nInputTapes,
2122 state->nInputRuns,
2123 state->maxTapes);
2124
2125 if (trace_sort)
2126 elog(LOG, "starting merge pass of %d input runs on %d tapes, " INT64_FORMAT " KB of memory for each input tape: %s",
2127 state->nInputRuns, state->nInputTapes, input_buffer_size / 1024,
2128 pg_rusage_show(&state->ru_start));
2129
2130 /* Prepare the new input tapes for merge pass. */
2131 for (tapenum = 0; tapenum < state->nInputTapes; tapenum++)
2132 LogicalTapeRewindForRead(state->inputTapes[tapenum], input_buffer_size);
2133
2134 /*
2135 * If there's just one run left on each input tape, then only one
2136 * merge pass remains. If we don't have to produce a materialized
2137 * sorted tape, we can stop at this point and do the final merge
2138 * on-the-fly.
2139 */
2140 if ((state->base.sortopt & TUPLESORT_RANDOMACCESS) == 0
2141 && state->nInputRuns <= state->nInputTapes
2142 && !WORKER(state))
2143 {
2144 /* Tell logtape.c we won't be writing anymore */
2146 /* Initialize for the final merge pass */
2148 state->status = TSS_FINALMERGE;
2149 return;
2150 }
2151 }
2152
2153 /* Select an output tape */
2155
2156 /* Merge one run from each input tape. */
2158
2159 /*
2160 * If the input tapes are empty, and we output only one output run,
2161 * we're done. The current output tape contains the final result.
2162 */
2163 if (state->nInputRuns == 0 && state->nOutputRuns <= 1)
2164 break;
2165 }
2166
2167 /*
2168 * Done. The result is on a single run on a single tape.
2169 */
2170 state->result_tape = state->outputTapes[0];
2171 if (!WORKER(state))
2172 LogicalTapeFreeze(state->result_tape, NULL);
2173 else
2175 state->status = TSS_SORTEDONTAPE;
2176
2177 /* Close all the now-empty input tapes, to release their read buffers. */
2178 for (tapenum = 0; tapenum < state->nInputTapes; tapenum++)
2179 LogicalTapeClose(state->inputTapes[tapenum]);
2180}
#define INT64_FORMAT
Definition: c.h:570
void LogicalTapeRewindForRead(LogicalTape *lt, size_t buffer_size)
Definition: logtape.c:846
void LogicalTapeSetForgetFreeSpace(LogicalTapeSet *lts)
Definition: logtape.c:750
void LogicalTapeClose(LogicalTape *lt)
Definition: logtape.c:733
void LogicalTapeFreeze(LogicalTape *lt, TapeShare *share)
Definition: logtape.c:981
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:419
static void mergeonerun(Tuplesortstate *state)
Definition: tuplesort.c:2186
static int64 merge_read_buffer_size(int64 avail_mem, int nInputTapes, int nInputRuns, int maxOutputTapes)
Definition: tuplesort.c:1819
static void worker_freeze_result_tape(Tuplesortstate *state)
Definition: tuplesort.c:2993
static void init_slab_allocator(Tuplesortstate *state, int numSlots)
Definition: tuplesort.c:1967
#define TUPLESORT_RANDOMACCESS
Definition: tuplesort.h:97

References Assert(), beginmerge(), elog, FREEMEM, GetMemoryChunkSpace(), init_slab_allocator(), INT64_FORMAT, LOG, LogicalTapeClose(), LogicalTapeFreeze(), LogicalTapeRewindForRead(), LogicalTapeSetForgetFreeSpace(), MemoryContextAlloc(), MemoryContextResetOnly(), merge_read_buffer_size(), mergeonerun(), palloc0(), pfree(), pg_rusage_show(), selectnewtape(), trace_sort, TSS_BUILDRUNS, TSS_FINALMERGE, TSS_SORTEDONTAPE, TUPLESORT_RANDOMACCESS, USEMEM, WORKER, and worker_freeze_result_tape().

Referenced by inittapes(), and tuplesort_performsort().

◆ qsort_tuple_int32_compare()

static pg_attribute_always_inline int qsort_tuple_int32_compare ( SortTuple a,
SortTuple b,
Tuplesortstate state 
)
static

Definition at line 538 of file tuplesort.c.

539{
540 int compare;
541
542 compare = ApplyInt32SortComparator(a->datum1, a->isnull1,
543 b->datum1, b->isnull1,
544 &state->base.sortKeys[0]);
545
546 if (compare != 0)
547 return compare;
548
549 /*
550 * No need to waste effort calling the tiebreak function when there are no
551 * other keys to sort on.
552 */
553 if (state->base.onlyKey != NULL)
554 return 0;
555
556 return state->base.comparetup_tiebreak(a, b, state);
557}
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
static int ApplyInt32SortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:300

References a, ApplyInt32SortComparator(), b, and compare().

◆ qsort_tuple_signed_compare()

static pg_attribute_always_inline int qsort_tuple_signed_compare ( SortTuple a,
SortTuple b,
Tuplesortstate state 
)
static

Definition at line 515 of file tuplesort.c.

516{
517 int compare;
518
519 compare = ApplySignedSortComparator(a->datum1, a->isnull1,
520 b->datum1, b->isnull1,
521 &state->base.sortKeys[0]);
522
523 if (compare != 0)
524 return compare;
525
526 /*
527 * No need to waste effort calling the tiebreak function when there are no
528 * other keys to sort on.
529 */
530 if (state->base.onlyKey != NULL)
531 return 0;
532
533 return state->base.comparetup_tiebreak(a, b, state);
534}
static int ApplySignedSortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:266

References a, ApplySignedSortComparator(), b, and compare().

◆ qsort_tuple_unsigned_compare()

static pg_attribute_always_inline int qsort_tuple_unsigned_compare ( SortTuple a,
SortTuple b,
Tuplesortstate state 
)
static

Definition at line 493 of file tuplesort.c.

494{
495 int compare;
496
497 compare = ApplyUnsignedSortComparator(a->datum1, a->isnull1,
498 b->datum1, b->isnull1,
499 &state->base.sortKeys[0]);
500 if (compare != 0)
501 return compare;
502
503 /*
504 * No need to waste effort calling the tiebreak function when there are no
505 * other keys to sort on.
506 */
507 if (state->base.onlyKey != NULL)
508 return 0;
509
510 return state->base.comparetup_tiebreak(a, b, state);
511}
static int ApplyUnsignedSortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:233

References a, ApplyUnsignedSortComparator(), b, and compare().

◆ reversedirection()

static void reversedirection ( Tuplesortstate state)
static

Definition at line 2822 of file tuplesort.c.

2823{
2824 SortSupport sortKey = state->base.sortKeys;
2825 int nkey;
2826
2827 for (nkey = 0; nkey < state->base.nKeys; nkey++, sortKey++)
2828 {
2829 sortKey->ssup_reverse = !sortKey->ssup_reverse;
2830 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
2831 }
2832}
bool ssup_nulls_first
Definition: sortsupport.h:75

References SortSupportData::ssup_nulls_first, and SortSupportData::ssup_reverse.

Referenced by make_bounded_heap(), and sort_bounded_heap().

◆ selectnewtape()

static void selectnewtape ( Tuplesortstate state)
static

Definition at line 1934 of file tuplesort.c.

1935{
1936 /*
1937 * At the beginning of each merge pass, nOutputTapes and nOutputRuns are
1938 * both zero. On each call, we create a new output tape to hold the next
1939 * run, until maxTapes is reached. After that, we assign new runs to the
1940 * existing tapes in a round robin fashion.
1941 */
1942 if (state->nOutputTapes < state->maxTapes)
1943 {
1944 /* Create a new tape to hold the next run */
1945 Assert(state->outputTapes[state->nOutputRuns] == NULL);
1946 Assert(state->nOutputRuns == state->nOutputTapes);
1947 state->destTape = LogicalTapeCreate(state->tapeset);
1948 state->outputTapes[state->nOutputTapes] = state->destTape;
1949 state->nOutputTapes++;
1950 state->nOutputRuns++;
1951 }
1952 else
1953 {
1954 /*
1955 * We have reached the max number of tapes. Append to an existing
1956 * tape.
1957 */
1958 state->destTape = state->outputTapes[state->nOutputRuns % state->nOutputTapes];
1959 state->nOutputRuns++;
1960 }
1961}
LogicalTape * LogicalTapeCreate(LogicalTapeSet *lts)
Definition: logtape.c:680

References Assert(), and LogicalTapeCreate().

Referenced by dumptuples(), inittapes(), and mergeruns().

◆ sort_bounded_heap()

static void sort_bounded_heap ( Tuplesortstate state)
static

Definition at line 2622 of file tuplesort.c.

2623{
2624 int tupcount = state->memtupcount;
2625
2626 Assert(state->status == TSS_BOUNDED);
2627 Assert(state->bounded);
2628 Assert(tupcount == state->bound);
2630
2631 /*
2632 * We can unheapify in place because each delete-top call will remove the
2633 * largest entry, which we can promptly store in the newly freed slot at
2634 * the end. Once we're down to a single-entry heap, we're done.
2635 */
2636 while (state->memtupcount > 1)
2637 {
2638 SortTuple stup = state->memtuples[0];
2639
2640 /* this sifts-up the next-largest entry and decreases memtupcount */
2642 state->memtuples[state->memtupcount] = stup;
2643 }
2644 state->memtupcount = tupcount;
2645
2646 /*
2647 * Reverse sort direction back to the original state. This is not
2648 * actually necessary but seems like a good idea for tidiness.
2649 */
2651
2652 state->status = TSS_SORTEDINMEM;
2653 state->boundUsed = true;
2654}

References Assert(), reversedirection(), SERIAL, TSS_BOUNDED, TSS_SORTEDINMEM, and tuplesort_heap_delete_top().

Referenced by tuplesort_performsort().

◆ ssup_datum_int32_cmp()

int ssup_datum_int32_cmp ( Datum  x,
Datum  y,
SortSupport  ssup 
)

Definition at line 3148 of file tuplesort.c.

3149{
3150 int32 xx = DatumGetInt32(x);
3151 int32 yy = DatumGetInt32(y);
3152
3153 if (xx < yy)
3154 return -1;
3155 else if (xx > yy)
3156 return 1;
3157 else
3158 return 0;
3159}
int32_t int32
Definition: c.h:548
int y
Definition: isn.c:76
int x
Definition: isn.c:75
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:212

References DatumGetInt32(), x, and y.

Referenced by btint4sortsupport(), date_sortsupport(), and tuplesort_sort_memtuples().

◆ ssup_datum_signed_cmp()

int ssup_datum_signed_cmp ( Datum  x,
Datum  y,
SortSupport  ssup 
)

Definition at line 3134 of file tuplesort.c.

3135{
3136 int64 xx = DatumGetInt64(x);
3137 int64 yy = DatumGetInt64(y);
3138
3139 if (xx < yy)
3140 return -1;
3141 else if (xx > yy)
3142 return 1;
3143 else
3144 return 0;
3145}
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:393

References DatumGetInt64(), x, and y.

Referenced by btint8sortsupport(), timestamp_sortsupport(), and tuplesort_sort_memtuples().

◆ ssup_datum_unsigned_cmp()

int ssup_datum_unsigned_cmp ( Datum  x,
Datum  y,
SortSupport  ssup 
)

Definition at line 3123 of file tuplesort.c.

3124{
3125 if (x < y)
3126 return -1;
3127 else if (x > y)
3128 return 1;
3129 else
3130 return 0;
3131}

References x, and y.

Referenced by gist_point_sortsupport(), macaddr_sortsupport(), network_sortsupport(), tuplesort_sort_memtuples(), uuid_sortsupport(), and varstr_sortsupport().

◆ tuplesort_attach_shared()

void tuplesort_attach_shared ( Sharedsort shared,
dsm_segment seg 
)

Definition at line 2945 of file tuplesort.c.

2946{
2947 /* Attach to SharedFileSet */
2948 SharedFileSetAttach(&shared->fileset, seg);
2949}
void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg)
Definition: sharedfileset.c:56

References Sharedsort::fileset, and SharedFileSetAttach().

Referenced by _brin_parallel_build_main(), _bt_parallel_build_main(), and _gin_parallel_build_main().

◆ tuplesort_begin_batch()

static void tuplesort_begin_batch ( Tuplesortstate state)
static

Definition at line 742 of file tuplesort.c.

743{
744 MemoryContext oldcontext;
745
746 oldcontext = MemoryContextSwitchTo(state->base.maincontext);
747
748 /*
749 * Caller tuple (e.g. IndexTuple) memory context.
750 *
751 * A dedicated child context used exclusively for caller passed tuples
752 * eases memory management. Resetting at key points reduces
753 * fragmentation. Note that the memtuples array of SortTuples is allocated
754 * in the parent context, not this context, because there is no need to
755 * free memtuples early. For bounded sorts, tuples may be pfreed in any
756 * order, so we use a regular aset.c context so that it can make use of
757 * free'd memory. When the sort is not bounded, we make use of a bump.c
758 * context as this keeps allocations more compact with less wastage.
759 * Allocations are also slightly more CPU efficient.
760 */
761 if (TupleSortUseBumpTupleCxt(state->base.sortopt))
762 state->base.tuplecontext = BumpContextCreate(state->base.sortcontext,
763 "Caller tuples",
765 else
766 state->base.tuplecontext = AllocSetContextCreate(state->base.sortcontext,
767 "Caller tuples",
769
770
771 state->status = TSS_INITIAL;
772 state->bounded = false;
773 state->boundUsed = false;
774
775 state->availMem = state->allowedMem;
776
777 state->tapeset = NULL;
778
779 state->memtupcount = 0;
780
781 state->growmemtuples = true;
782 state->slabAllocatorUsed = false;
783 if (state->memtuples != NULL && state->memtupsize != INITIAL_MEMTUPSIZE)
784 {
785 pfree(state->memtuples);
786 state->memtuples = NULL;
787 state->memtupsize = INITIAL_MEMTUPSIZE;
788 }
789 if (state->memtuples == NULL)
790 {
791 state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
793 }
794
795 /* workMem must be large enough for the minimal memtuples array */
796 if (LACKMEM(state))
797 elog(ERROR, "insufficient memory allowed for sort");
798
799 state->currentRun = 0;
800
801 /*
802 * Tape variables (inputTapes, outputTapes, etc.) will be initialized by
803 * inittapes(), if needed.
804 */
805
806 state->result_tape = NULL; /* flag that result tape has not been formed */
807
808 MemoryContextSwitchTo(oldcontext);
809}
MemoryContext BumpContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: bump.c:133
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
#define INITIAL_MEMTUPSIZE
Definition: tuplesort.c:118
#define TupleSortUseBumpTupleCxt(opt)
Definition: tuplesort.h:109

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, BumpContextCreate(), elog, ERROR, GetMemoryChunkSpace(), INITIAL_MEMTUPSIZE, LACKMEM, MemoryContextSwitchTo(), palloc(), pfree(), TSS_INITIAL, TupleSortUseBumpTupleCxt, and USEMEM.

Referenced by tuplesort_begin_common(), and tuplesort_reset().

◆ tuplesort_begin_common()

Tuplesortstate * tuplesort_begin_common ( int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 636 of file tuplesort.c.

637{
639 MemoryContext maincontext;
640 MemoryContext sortcontext;
641 MemoryContext oldcontext;
642
643 /* See leader_takeover_tapes() remarks on random access support */
644 if (coordinate && (sortopt & TUPLESORT_RANDOMACCESS))
645 elog(ERROR, "random access disallowed under parallel sort");
646
647 /*
648 * Memory context surviving tuplesort_reset. This memory context holds
649 * data which is useful to keep while sorting multiple similar batches.
650 */
652 "TupleSort main",
654
655 /*
656 * Create a working memory context for one sort operation. The content of
657 * this context is deleted by tuplesort_reset.
658 */
659 sortcontext = AllocSetContextCreate(maincontext,
660 "TupleSort sort",
662
663 /*
664 * Additionally a working memory context for tuples is setup in
665 * tuplesort_begin_batch.
666 */
667
668 /*
669 * Make the Tuplesortstate within the per-sortstate context. This way, we
670 * don't need a separate pfree() operation for it at shutdown.
671 */
672 oldcontext = MemoryContextSwitchTo(maincontext);
673
675
676 if (trace_sort)
677 pg_rusage_init(&state->ru_start);
678
679 state->base.sortopt = sortopt;
680 state->base.tuples = true;
681 state->abbrevNext = 10;
682
683 /*
684 * workMem is forced to be at least 64KB, the current minimum valid value
685 * for the work_mem GUC. This is a defense against parallel sort callers
686 * that divide out memory among many workers in a way that leaves each
687 * with very little memory.
688 */
689 state->allowedMem = Max(workMem, 64) * (int64) 1024;
690 state->base.sortcontext = sortcontext;
691 state->base.maincontext = maincontext;
692
693 state->memtupsize = INITIAL_MEMTUPSIZE;
694 state->memtuples = NULL;
695
696 /*
697 * After all of the other non-parallel-related state, we setup all of the
698 * state needed for each batch.
699 */
701
702 /*
703 * Initialize parallel-related state based on coordination information
704 * from caller
705 */
706 if (!coordinate)
707 {
708 /* Serial sort */
709 state->shared = NULL;
710 state->worker = -1;
711 state->nParticipants = -1;
712 }
713 else if (coordinate->isWorker)
714 {
715 /* Parallel worker produces exactly one final run from all input */
716 state->shared = coordinate->sharedsort;
718 state->nParticipants = -1;
719 }
720 else
721 {
722 /* Parallel leader state only used for final merge */
723 state->shared = coordinate->sharedsort;
724 state->worker = -1;
725 state->nParticipants = coordinate->nParticipants;
726 Assert(state->nParticipants >= 1);
727 }
728
729 MemoryContextSwitchTo(oldcontext);
730
731 return state;
732}
#define palloc0_object(type)
Definition: fe_memutils.h:75
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
Sharedsort * sharedsort
Definition: tuplesort.h:59
static int worker_get_identifier(Tuplesortstate *state)
Definition: tuplesort.c:2965
static void tuplesort_begin_batch(Tuplesortstate *state)
Definition: tuplesort.c:742

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert(), CurrentMemoryContext, elog, ERROR, INITIAL_MEMTUPSIZE, SortCoordinateData::isWorker, Max, MemoryContextSwitchTo(), SortCoordinateData::nParticipants, palloc0_object, pg_rusage_init(), SortCoordinateData::sharedsort, trace_sort, tuplesort_begin_batch(), TUPLESORT_RANDOMACCESS, and worker_get_identifier().

Referenced by tuplesort_begin_cluster(), tuplesort_begin_datum(), tuplesort_begin_heap(), tuplesort_begin_index_brin(), tuplesort_begin_index_btree(), tuplesort_begin_index_gin(), tuplesort_begin_index_gist(), and tuplesort_begin_index_hash().

◆ tuplesort_end()

◆ tuplesort_estimate_shared()

Size tuplesort_estimate_shared ( int  nWorkers)

Definition at line 2901 of file tuplesort.c.

2902{
2903 Size tapesSize;
2904
2905 Assert(nWorkers > 0);
2906
2907 /* Make sure that BufFile shared state is MAXALIGN'd */
2908 tapesSize = mul_size(sizeof(TapeShare), nWorkers);
2909 tapesSize = MAXALIGN(add_size(tapesSize, offsetof(Sharedsort, tapes)));
2910
2911 return tapesSize;
2912}
#define MAXALIGN(LEN)
Definition: c.h:824
Size add_size(Size s1, Size s2)
Definition: shmem.c:495
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510

References add_size(), Assert(), MAXALIGN, and mul_size().

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), and _gin_begin_parallel().

◆ tuplesort_free()

static void tuplesort_free ( Tuplesortstate state)
static

Definition at line 883 of file tuplesort.c.

884{
885 /* context swap probably not needed, but let's be safe */
886 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
887 int64 spaceUsed;
888
889 if (state->tapeset)
890 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
891 else
892 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
893
894 /*
895 * Delete temporary "tape" files, if any.
896 *
897 * We don't bother to destroy the individual tapes here. They will go away
898 * with the sortcontext. (In TSS_FINALMERGE state, we have closed
899 * finished tapes already.)
900 */
901 if (state->tapeset)
902 LogicalTapeSetClose(state->tapeset);
903
904 if (trace_sort)
905 {
906 if (state->tapeset)
907 elog(LOG, "%s of worker %d ended, %" PRId64 " disk blocks used: %s",
908 SERIAL(state) ? "external sort" : "parallel external sort",
909 state->worker, spaceUsed, pg_rusage_show(&state->ru_start));
910 else
911 elog(LOG, "%s of worker %d ended, %" PRId64 " KB used: %s",
912 SERIAL(state) ? "internal sort" : "unperformed parallel sort",
913 state->worker, spaceUsed, pg_rusage_show(&state->ru_start));
914 }
915
916 TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
917
919 MemoryContextSwitchTo(oldcontext);
920
921 /*
922 * Free the per-sort memory context, thereby releasing all working memory.
923 */
924 MemoryContextReset(state->base.sortcontext);
925}
int64 LogicalTapeSetBlocks(LogicalTapeSet *lts)
Definition: logtape.c:1181
void LogicalTapeSetClose(LogicalTapeSet *lts)
Definition: logtape.c:667
#define FREESTATE(state)
Definition: tuplesort.c:397

References elog, FREESTATE, LOG, LogicalTapeSetBlocks(), LogicalTapeSetClose(), MemoryContextReset(), MemoryContextSwitchTo(), pg_rusage_show(), SERIAL, and trace_sort.

Referenced by tuplesort_end(), and tuplesort_reset().

◆ tuplesort_get_stats()

void tuplesort_get_stats ( Tuplesortstate state,
TuplesortInstrumentation stats 
)

Definition at line 2485 of file tuplesort.c.

2487{
2488 /*
2489 * Note: it might seem we should provide both memory and disk usage for a
2490 * disk-based sort. However, the current code doesn't track memory space
2491 * accurately once we have begun to return tuples to the caller (since we
2492 * don't account for pfree's the caller is expected to do), so we cannot
2493 * rely on availMem in a disk sort. This does not seem worth the overhead
2494 * to fix. Is it worth creating an API for the memory context code to
2495 * tell us how much is actually used in sortcontext?
2496 */
2498
2499 if (state->isMaxSpaceDisk)
2501 else
2503 stats->spaceUsed = (state->maxSpace + 1023) / 1024;
2504
2505 switch (state->maxSpaceStatus)
2506 {
2507 case TSS_SORTEDINMEM:
2508 if (state->boundUsed)
2510 else
2512 break;
2513 case TSS_SORTEDONTAPE:
2515 break;
2516 case TSS_FINALMERGE:
2518 break;
2519 default:
2521 break;
2522 }
2523}
TuplesortMethod sortMethod
Definition: tuplesort.h:113
TuplesortSpaceType spaceType
Definition: tuplesort.h:114
static void tuplesort_updatemax(Tuplesortstate *state)
Definition: tuplesort.c:954
@ SORT_SPACE_TYPE_DISK
Definition: tuplesort.h:89
@ SORT_SPACE_TYPE_MEMORY
Definition: tuplesort.h:90
@ SORT_TYPE_EXTERNAL_SORT
Definition: tuplesort.h:81
@ SORT_TYPE_TOP_N_HEAPSORT
Definition: tuplesort.h:79
@ SORT_TYPE_QUICKSORT
Definition: tuplesort.h:80
@ SORT_TYPE_STILL_IN_PROGRESS
Definition: tuplesort.h:78
@ SORT_TYPE_EXTERNAL_MERGE
Definition: tuplesort.h:82

References SORT_SPACE_TYPE_DISK, SORT_SPACE_TYPE_MEMORY, SORT_TYPE_EXTERNAL_MERGE, SORT_TYPE_EXTERNAL_SORT, SORT_TYPE_QUICKSORT, SORT_TYPE_STILL_IN_PROGRESS, SORT_TYPE_TOP_N_HEAPSORT, TuplesortInstrumentation::sortMethod, TuplesortInstrumentation::spaceType, TuplesortInstrumentation::spaceUsed, TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and tuplesort_updatemax().

Referenced by ExecSort(), instrumentSortedGroup(), and show_sort_info().

◆ tuplesort_gettuple_common()

bool tuplesort_gettuple_common ( Tuplesortstate state,
bool  forward,
SortTuple stup 
)

Definition at line 1456 of file tuplesort.c.

1458{
1459 unsigned int tuplen;
1460 size_t nmoved;
1461
1462 Assert(!WORKER(state));
1463
1464 switch (state->status)
1465 {
1466 case TSS_SORTEDINMEM:
1467 Assert(forward || state->base.sortopt & TUPLESORT_RANDOMACCESS);
1468 Assert(!state->slabAllocatorUsed);
1469 if (forward)
1470 {
1471 if (state->current < state->memtupcount)
1472 {
1473 *stup = state->memtuples[state->current++];
1474 return true;
1475 }
1476 state->eof_reached = true;
1477
1478 /*
1479 * Complain if caller tries to retrieve more tuples than
1480 * originally asked for in a bounded sort. This is because
1481 * returning EOF here might be the wrong thing.
1482 */
1483 if (state->bounded && state->current >= state->bound)
1484 elog(ERROR, "retrieved too many tuples in a bounded sort");
1485
1486 return false;
1487 }
1488 else
1489 {
1490 if (state->current <= 0)
1491 return false;
1492
1493 /*
1494 * if all tuples are fetched already then we return last
1495 * tuple, else - tuple before last returned.
1496 */
1497 if (state->eof_reached)
1498 state->eof_reached = false;
1499 else
1500 {
1501 state->current--; /* last returned tuple */
1502 if (state->current <= 0)
1503 return false;
1504 }
1505 *stup = state->memtuples[state->current - 1];
1506 return true;
1507 }
1508 break;
1509
1510 case TSS_SORTEDONTAPE:
1511 Assert(forward || state->base.sortopt & TUPLESORT_RANDOMACCESS);
1512 Assert(state->slabAllocatorUsed);
1513
1514 /*
1515 * The slot that held the tuple that we returned in previous
1516 * gettuple call can now be reused.
1517 */
1518 if (state->lastReturnedTuple)
1519 {
1520 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1521 state->lastReturnedTuple = NULL;
1522 }
1523
1524 if (forward)
1525 {
1526 if (state->eof_reached)
1527 return false;
1528
1529 if ((tuplen = getlen(state->result_tape, true)) != 0)
1530 {
1531 READTUP(state, stup, state->result_tape, tuplen);
1532
1533 /*
1534 * Remember the tuple we return, so that we can recycle
1535 * its memory on next call. (This can be NULL, in the
1536 * !state->tuples case).
1537 */
1538 state->lastReturnedTuple = stup->tuple;
1539
1540 return true;
1541 }
1542 else
1543 {
1544 state->eof_reached = true;
1545 return false;
1546 }
1547 }
1548
1549 /*
1550 * Backward.
1551 *
1552 * if all tuples are fetched already then we return last tuple,
1553 * else - tuple before last returned.
1554 */
1555 if (state->eof_reached)
1556 {
1557 /*
1558 * Seek position is pointing just past the zero tuplen at the
1559 * end of file; back up to fetch last tuple's ending length
1560 * word. If seek fails we must have a completely empty file.
1561 */
1562 nmoved = LogicalTapeBackspace(state->result_tape,
1563 2 * sizeof(unsigned int));
1564 if (nmoved == 0)
1565 return false;
1566 else if (nmoved != 2 * sizeof(unsigned int))
1567 elog(ERROR, "unexpected tape position");
1568 state->eof_reached = false;
1569 }
1570 else
1571 {
1572 /*
1573 * Back up and fetch previously-returned tuple's ending length
1574 * word. If seek fails, assume we are at start of file.
1575 */
1576 nmoved = LogicalTapeBackspace(state->result_tape,
1577 sizeof(unsigned int));
1578 if (nmoved == 0)
1579 return false;
1580 else if (nmoved != sizeof(unsigned int))
1581 elog(ERROR, "unexpected tape position");
1582 tuplen = getlen(state->result_tape, false);
1583
1584 /*
1585 * Back up to get ending length word of tuple before it.
1586 */
1587 nmoved = LogicalTapeBackspace(state->result_tape,
1588 tuplen + 2 * sizeof(unsigned int));
1589 if (nmoved == tuplen + sizeof(unsigned int))
1590 {
1591 /*
1592 * We backed up over the previous tuple, but there was no
1593 * ending length word before it. That means that the prev
1594 * tuple is the first tuple in the file. It is now the
1595 * next to read in forward direction (not obviously right,
1596 * but that is what in-memory case does).
1597 */
1598 return false;
1599 }
1600 else if (nmoved != tuplen + 2 * sizeof(unsigned int))
1601 elog(ERROR, "bogus tuple length in backward scan");
1602 }
1603
1604 tuplen = getlen(state->result_tape, false);
1605
1606 /*
1607 * Now we have the length of the prior tuple, back up and read it.
1608 * Note: READTUP expects we are positioned after the initial
1609 * length word of the tuple, so back up to that point.
1610 */
1611 nmoved = LogicalTapeBackspace(state->result_tape,
1612 tuplen);
1613 if (nmoved != tuplen)
1614 elog(ERROR, "bogus tuple length in backward scan");
1615 READTUP(state, stup, state->result_tape, tuplen);
1616
1617 /*
1618 * Remember the tuple we return, so that we can recycle its memory
1619 * on next call. (This can be NULL, in the Datum case).
1620 */
1621 state->lastReturnedTuple = stup->tuple;
1622
1623 return true;
1624
1625 case TSS_FINALMERGE:
1626 Assert(forward);
1627 /* We are managing memory ourselves, with the slab allocator. */
1628 Assert(state->slabAllocatorUsed);
1629
1630 /*
1631 * The slab slot holding the tuple that we returned in previous
1632 * gettuple call can now be reused.
1633 */
1634 if (state->lastReturnedTuple)
1635 {
1636 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1637 state->lastReturnedTuple = NULL;
1638 }
1639
1640 /*
1641 * This code should match the inner loop of mergeonerun().
1642 */
1643 if (state->memtupcount > 0)
1644 {
1645 int srcTapeIndex = state->memtuples[0].srctape;
1646 LogicalTape *srcTape = state->inputTapes[srcTapeIndex];
1647 SortTuple newtup;
1648
1649 *stup = state->memtuples[0];
1650
1651 /*
1652 * Remember the tuple we return, so that we can recycle its
1653 * memory on next call. (This can be NULL, in the Datum case).
1654 */
1655 state->lastReturnedTuple = stup->tuple;
1656
1657 /*
1658 * Pull next tuple from tape, and replace the returned tuple
1659 * at top of the heap with it.
1660 */
1661 if (!mergereadnext(state, srcTape, &newtup))
1662 {
1663 /*
1664 * If no more data, we've reached end of run on this tape.
1665 * Remove the top node from the heap.
1666 */
1668 state->nInputRuns--;
1669
1670 /*
1671 * Close the tape. It'd go away at the end of the sort
1672 * anyway, but better to release the memory early.
1673 */
1674 LogicalTapeClose(srcTape);
1675 return true;
1676 }
1677 newtup.srctape = srcTapeIndex;
1679 return true;
1680 }
1681 return false;
1682
1683 default:
1684 elog(ERROR, "invalid tuplesort state");
1685 return false; /* keep compiler quiet */
1686 }
1687}
size_t LogicalTapeBackspace(LogicalTape *lt, size_t size)
Definition: logtape.c:1062

References Assert(), elog, ERROR, getlen(), LogicalTapeBackspace(), LogicalTapeClose(), mergereadnext(), READTUP, RELEASE_SLAB_SLOT, SortTuple::srctape, TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, SortTuple::tuple, tuplesort_heap_delete_top(), tuplesort_heap_replace_top(), TUPLESORT_RANDOMACCESS, and WORKER.

Referenced by tuplesort_getbrintuple(), tuplesort_getdatum(), tuplesort_getgintuple(), tuplesort_getheaptuple(), tuplesort_getindextuple(), tuplesort_gettupleslot(), and tuplesort_skiptuples().

◆ tuplesort_heap_delete_top()

static void tuplesort_heap_delete_top ( Tuplesortstate state)
static

Definition at line 2758 of file tuplesort.c.

2759{
2760 SortTuple *memtuples = state->memtuples;
2761 SortTuple *tuple;
2762
2763 if (--state->memtupcount <= 0)
2764 return;
2765
2766 /*
2767 * Remove the last tuple in the heap, and re-insert it, by replacing the
2768 * current top node with it.
2769 */
2770 tuple = &memtuples[state->memtupcount];
2772}

References tuplesort_heap_replace_top().

Referenced by mergeonerun(), sort_bounded_heap(), and tuplesort_gettuple_common().

◆ tuplesort_heap_insert()

static void tuplesort_heap_insert ( Tuplesortstate state,
SortTuple tuple 
)
static

Definition at line 2723 of file tuplesort.c.

2724{
2725 SortTuple *memtuples;
2726 int j;
2727
2728 memtuples = state->memtuples;
2729 Assert(state->memtupcount < state->memtupsize);
2730
2732
2733 /*
2734 * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
2735 * using 1-based array indexes, not 0-based.
2736 */
2737 j = state->memtupcount++;
2738 while (j > 0)
2739 {
2740 int i = (j - 1) >> 1;
2741
2742 if (COMPARETUP(state, tuple, &memtuples[i]) >= 0)
2743 break;
2744 memtuples[j] = memtuples[i];
2745 j = i;
2746 }
2747 memtuples[j] = *tuple;
2748}

References Assert(), CHECK_FOR_INTERRUPTS, COMPARETUP, i, and j.

Referenced by beginmerge(), and make_bounded_heap().

◆ tuplesort_heap_replace_top()

static void tuplesort_heap_replace_top ( Tuplesortstate state,
SortTuple tuple 
)
static

Definition at line 2782 of file tuplesort.c.

2783{
2784 SortTuple *memtuples = state->memtuples;
2785 unsigned int i,
2786 n;
2787
2788 Assert(state->memtupcount >= 1);
2789
2791
2792 /*
2793 * state->memtupcount is "int", but we use "unsigned int" for i, j, n.
2794 * This prevents overflow in the "2 * i + 1" calculation, since at the top
2795 * of the loop we must have i < n <= INT_MAX <= UINT_MAX/2.
2796 */
2797 n = state->memtupcount;
2798 i = 0; /* i is where the "hole" is */
2799 for (;;)
2800 {
2801 unsigned int j = 2 * i + 1;
2802
2803 if (j >= n)
2804 break;
2805 if (j + 1 < n &&
2806 COMPARETUP(state, &memtuples[j], &memtuples[j + 1]) > 0)
2807 j++;
2808 if (COMPARETUP(state, tuple, &memtuples[j]) <= 0)
2809 break;
2810 memtuples[i] = memtuples[j];
2811 i = j;
2812 }
2813 memtuples[i] = *tuple;
2814}

References Assert(), CHECK_FOR_INTERRUPTS, COMPARETUP, i, and j.

Referenced by make_bounded_heap(), mergeonerun(), tuplesort_gettuple_common(), tuplesort_heap_delete_top(), and tuplesort_puttuple_common().

◆ tuplesort_initialize_shared()

void tuplesort_initialize_shared ( Sharedsort shared,
int  nWorkers,
dsm_segment seg 
)

Definition at line 2922 of file tuplesort.c.

2923{
2924 int i;
2925
2926 Assert(nWorkers > 0);
2927
2928 SpinLockInit(&shared->mutex);
2929 shared->currentWorker = 0;
2930 shared->workersFinished = 0;
2931 SharedFileSetInit(&shared->fileset, seg);
2932 shared->nTapes = nWorkers;
2933 for (i = 0; i < nWorkers; i++)
2934 {
2935 shared->tapes[i].firstblocknumber = 0L;
2936 }
2937}
void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
Definition: sharedfileset.c:38
#define SpinLockInit(lock)
Definition: spin.h:57
int nTapes
Definition: tuplesort.c:361
int currentWorker
Definition: tuplesort.c:354
int64 firstblocknumber
Definition: logtape.h:54

References Assert(), Sharedsort::currentWorker, Sharedsort::fileset, TapeShare::firstblocknumber, i, Sharedsort::mutex, Sharedsort::nTapes, SharedFileSetInit(), SpinLockInit, Sharedsort::tapes, and Sharedsort::workersFinished.

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), and _gin_begin_parallel().

◆ tuplesort_markpos()

void tuplesort_markpos ( Tuplesortstate state)

Definition at line 2421 of file tuplesort.c.

2422{
2423 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2424
2425 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2426
2427 switch (state->status)
2428 {
2429 case TSS_SORTEDINMEM:
2430 state->markpos_offset = state->current;
2431 state->markpos_eof = state->eof_reached;
2432 break;
2433 case TSS_SORTEDONTAPE:
2434 LogicalTapeTell(state->result_tape,
2435 &state->markpos_block,
2436 &state->markpos_offset);
2437 state->markpos_eof = state->eof_reached;
2438 break;
2439 default:
2440 elog(ERROR, "invalid tuplesort state");
2441 break;
2442 }
2443
2444 MemoryContextSwitchTo(oldcontext);
2445}
void LogicalTapeTell(LogicalTape *lt, int64 *blocknum, int *offset)
Definition: logtape.c:1162

References Assert(), elog, ERROR, LogicalTapeTell(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecSortMarkPos().

◆ tuplesort_merge_order()

int tuplesort_merge_order ( int64  allowedMem)

Definition at line 1764 of file tuplesort.c.

1765{
1766 int mOrder;
1767
1768 /*----------
1769 * In the merge phase, we need buffer space for each input and output tape.
1770 * Each pass in the balanced merge algorithm reads from M input tapes, and
1771 * writes to N output tapes. Each tape consumes TAPE_BUFFER_OVERHEAD bytes
1772 * of memory. In addition to that, we want MERGE_BUFFER_SIZE workspace per
1773 * input tape.
1774 *
1775 * totalMem = M * (TAPE_BUFFER_OVERHEAD + MERGE_BUFFER_SIZE) +
1776 * N * TAPE_BUFFER_OVERHEAD
1777 *
1778 * Except for the last and next-to-last merge passes, where there can be
1779 * fewer tapes left to process, M = N. We choose M so that we have the
1780 * desired amount of memory available for the input buffers
1781 * (TAPE_BUFFER_OVERHEAD + MERGE_BUFFER_SIZE), given the total memory
1782 * available for the tape buffers (allowedMem).
1783 *
1784 * Note: you might be thinking we need to account for the memtuples[]
1785 * array in this calculation, but we effectively treat that as part of the
1786 * MERGE_BUFFER_SIZE workspace.
1787 *----------
1788 */
1789 mOrder = allowedMem /
1791
1792 /*
1793 * Even in minimum memory, use at least a MINORDER merge. On the other
1794 * hand, even when we have lots of memory, do not use more than a MAXORDER
1795 * merge. Tapes are pretty cheap, but they're not entirely free. Each
1796 * additional tape reduces the amount of memory available to build runs,
1797 * which in turn can cause the same sort to need more runs, which makes
1798 * merging slower even if it can still be done in a single pass. Also,
1799 * high order merges are quite slow due to CPU cache effects; it can be
1800 * faster to pay the I/O cost of a multi-pass merge than to perform a
1801 * single merge pass across many hundreds of tapes.
1802 */
1803 mOrder = Max(mOrder, MINORDER);
1804 mOrder = Min(mOrder, MAXORDER);
1805
1806 return mOrder;
1807}
#define MAXORDER
Definition: tuplesort.c:175
#define MERGE_BUFFER_SIZE
Definition: tuplesort.c:177

References Max, MAXORDER, MERGE_BUFFER_SIZE, Min, MINORDER, and TAPE_BUFFER_OVERHEAD.

Referenced by cost_tuplesort(), and inittapes().

◆ tuplesort_method_name()

const char * tuplesort_method_name ( TuplesortMethod  m)

Definition at line 2529 of file tuplesort.c.

2530{
2531 switch (m)
2532 {
2534 return "still in progress";
2536 return "top-N heapsort";
2538 return "quicksort";
2540 return "external sort";
2542 return "external merge";
2543 }
2544
2545 return "unknown";
2546}

References SORT_TYPE_EXTERNAL_MERGE, SORT_TYPE_EXTERNAL_SORT, SORT_TYPE_QUICKSORT, SORT_TYPE_STILL_IN_PROGRESS, and SORT_TYPE_TOP_N_HEAPSORT.

Referenced by show_incremental_sort_group_info(), and show_sort_info().

◆ tuplesort_performsort()

void tuplesort_performsort ( Tuplesortstate state)

Definition at line 1349 of file tuplesort.c.

1350{
1351 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1352
1353 if (trace_sort)
1354 elog(LOG, "performsort of worker %d starting: %s",
1355 state->worker, pg_rusage_show(&state->ru_start));
1356
1357 switch (state->status)
1358 {
1359 case TSS_INITIAL:
1360
1361 /*
1362 * We were able to accumulate all the tuples within the allowed
1363 * amount of memory, or leader to take over worker tapes
1364 */
1365 if (SERIAL(state))
1366 {
1367 /* Just qsort 'em and we're done */
1369 state->status = TSS_SORTEDINMEM;
1370 }
1371 else if (WORKER(state))
1372 {
1373 /*
1374 * Parallel workers must still dump out tuples to tape. No
1375 * merge is required to produce single output run, though.
1376 */
1377 inittapes(state, false);
1378 dumptuples(state, true);
1380 state->status = TSS_SORTEDONTAPE;
1381 }
1382 else
1383 {
1384 /*
1385 * Leader will take over worker tapes and merge worker runs.
1386 * Note that mergeruns sets the correct state->status.
1387 */
1390 }
1391 state->current = 0;
1392 state->eof_reached = false;
1393 state->markpos_block = 0L;
1394 state->markpos_offset = 0;
1395 state->markpos_eof = false;
1396 break;
1397
1398 case TSS_BOUNDED:
1399
1400 /*
1401 * We were able to accumulate all the tuples required for output
1402 * in memory, using a heap to eliminate excess tuples. Now we
1403 * have to transform the heap to a properly-sorted array. Note
1404 * that sort_bounded_heap sets the correct state->status.
1405 */
1407 state->current = 0;
1408 state->eof_reached = false;
1409 state->markpos_offset = 0;
1410 state->markpos_eof = false;
1411 break;
1412
1413 case TSS_BUILDRUNS:
1414
1415 /*
1416 * Finish tape-based sort. First, flush all tuples remaining in
1417 * memory out to tape; then merge until we have a single remaining
1418 * run (or, if !randomAccess and !WORKER(), one run per tape).
1419 * Note that mergeruns sets the correct state->status.
1420 */
1421 dumptuples(state, true);
1423 state->eof_reached = false;
1424 state->markpos_block = 0L;
1425 state->markpos_offset = 0;
1426 state->markpos_eof = false;
1427 break;
1428
1429 default:
1430 elog(ERROR, "invalid tuplesort state");
1431 break;
1432 }
1433
1434 if (trace_sort)
1435 {
1436 if (state->status == TSS_FINALMERGE)
1437 elog(LOG, "performsort of worker %d done (except %d-way final merge): %s",
1438 state->worker, state->nInputTapes,
1439 pg_rusage_show(&state->ru_start));
1440 else
1441 elog(LOG, "performsort of worker %d done: %s",
1442 state->worker, pg_rusage_show(&state->ru_start));
1443 }
1444
1445 MemoryContextSwitchTo(oldcontext);
1446}
static void sort_bounded_heap(Tuplesortstate *state)
Definition: tuplesort.c:2622
static void leader_takeover_tapes(Tuplesortstate *state)
Definition: tuplesort.c:3053
static void inittapes(Tuplesortstate *state, bool mergeruns)
Definition: tuplesort.c:1851
static void worker_nomergeruns(Tuplesortstate *state)
Definition: tuplesort.c:3031
static void dumptuples(Tuplesortstate *state, bool alltuples)
Definition: tuplesort.c:2293

References dumptuples(), elog, ERROR, inittapes(), leader_takeover_tapes(), LOG, MemoryContextSwitchTo(), mergeruns(), pg_rusage_show(), SERIAL, sort_bounded_heap(), trace_sort, TSS_BOUNDED, TSS_BUILDRUNS, TSS_FINALMERGE, TSS_INITIAL, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, tuplesort_sort_memtuples(), WORKER, and worker_nomergeruns().

Referenced by _brin_parallel_merge(), _brin_parallel_scan_and_build(), _bt_leafbuild(), _bt_parallel_scan_and_sort(), _gin_parallel_merge(), _gin_parallel_scan_and_build(), _gin_process_worker_data(), _h_indexbuild(), array_sort_internal(), ExecIncrementalSort(), ExecSort(), gistbuild(), heapam_relation_copy_for_cluster(), hypothetical_dense_rank_final(), hypothetical_rank_common(), initialize_phase(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), percentile_disc_multi_final(), process_ordered_aggregate_multi(), process_ordered_aggregate_single(), switchToPresortedPrefixMode(), and validate_index().

◆ tuplesort_puttuple_common()

void tuplesort_puttuple_common ( Tuplesortstate state,
SortTuple tuple,
bool  useAbbrev,
Size  tuplen 
)

Definition at line 1155 of file tuplesort.c.

1157{
1158 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1159
1160 Assert(!LEADER(state));
1161
1162 /* account for the memory used for this tuple */
1163 USEMEM(state, tuplen);
1164 state->tupleMem += tuplen;
1165
1166 if (!useAbbrev)
1167 {
1168 /*
1169 * Leave ordinary Datum representation, or NULL value. If there is a
1170 * converter it won't expect NULL values, and cost model is not
1171 * required to account for NULL, so in that case we avoid calling
1172 * converter and just set datum1 to zeroed representation (to be
1173 * consistent, and to support cheap inequality tests for NULL
1174 * abbreviated keys).
1175 */
1176 }
1177 else if (!consider_abort_common(state))
1178 {
1179 /* Store abbreviated key representation */
1180 tuple->datum1 = state->base.sortKeys->abbrev_converter(tuple->datum1,
1181 state->base.sortKeys);
1182 }
1183 else
1184 {
1185 /*
1186 * Set state to be consistent with never trying abbreviation.
1187 *
1188 * Alter datum1 representation in already-copied tuples, so as to
1189 * ensure a consistent representation (current tuple was just
1190 * handled). It does not matter if some dumped tuples are already
1191 * sorted on tape, since serialized tuples lack abbreviated keys
1192 * (TSS_BUILDRUNS state prevents control reaching here in any case).
1193 */
1194 REMOVEABBREV(state, state->memtuples, state->memtupcount);
1195 }
1196
1197 switch (state->status)
1198 {
1199 case TSS_INITIAL:
1200
1201 /*
1202 * Save the tuple into the unsorted array. First, grow the array
1203 * as needed. Note that we try to grow the array when there is
1204 * still one free slot remaining --- if we fail, there'll still be
1205 * room to store the incoming tuple, and then we'll switch to
1206 * tape-based operation.
1207 */
1208 if (state->memtupcount >= state->memtupsize - 1)
1209 {
1210 (void) grow_memtuples(state);
1211 Assert(state->memtupcount < state->memtupsize);
1212 }
1213 state->memtuples[state->memtupcount++] = *tuple;
1214
1215 /*
1216 * Check if it's time to switch over to a bounded heapsort. We do
1217 * so if the input tuple count exceeds twice the desired tuple
1218 * count (this is a heuristic for where heapsort becomes cheaper
1219 * than a quicksort), or if we've just filled workMem and have
1220 * enough tuples to meet the bound.
1221 *
1222 * Note that once we enter TSS_BOUNDED state we will always try to
1223 * complete the sort that way. In the worst case, if later input
1224 * tuples are larger than earlier ones, this might cause us to
1225 * exceed workMem significantly.
1226 */
1227 if (state->bounded &&
1228 (state->memtupcount > state->bound * 2 ||
1229 (state->memtupcount > state->bound && LACKMEM(state))))
1230 {
1231 if (trace_sort)
1232 elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1233 state->memtupcount,
1234 pg_rusage_show(&state->ru_start));
1236 MemoryContextSwitchTo(oldcontext);
1237 return;
1238 }
1239
1240 /*
1241 * Done if we still fit in available memory and have array slots.
1242 */
1243 if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1244 {
1245 MemoryContextSwitchTo(oldcontext);
1246 return;
1247 }
1248
1249 /*
1250 * Nope; time to switch to tape-based operation.
1251 */
1252 inittapes(state, true);
1253
1254 /*
1255 * Dump all tuples.
1256 */
1257 dumptuples(state, false);
1258 break;
1259
1260 case TSS_BOUNDED:
1261
1262 /*
1263 * We don't want to grow the array here, so check whether the new
1264 * tuple can be discarded before putting it in. This should be a
1265 * good speed optimization, too, since when there are many more
1266 * input tuples than the bound, most input tuples can be discarded
1267 * with just this one comparison. Note that because we currently
1268 * have the sort direction reversed, we must check for <= not >=.
1269 */
1270 if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1271 {
1272 /* new tuple <= top of the heap, so we can discard it */
1273 free_sort_tuple(state, tuple);
1275 }
1276 else
1277 {
1278 /* discard top of heap, replacing it with the new tuple */
1279 free_sort_tuple(state, &state->memtuples[0]);
1281 }
1282 break;
1283
1284 case TSS_BUILDRUNS:
1285
1286 /*
1287 * Save the tuple into the unsorted array (there must be space)
1288 */
1289 state->memtuples[state->memtupcount++] = *tuple;
1290
1291 /*
1292 * If we are over the memory limit, dump all tuples.
1293 */
1294 dumptuples(state, false);
1295 break;
1296
1297 default:
1298 elog(ERROR, "invalid tuplesort state");
1299 break;
1300 }
1301 MemoryContextSwitchTo(oldcontext);
1302}
Datum datum1
Definition: tuplesort.h:151
#define REMOVEABBREV(state, stup, count)
Definition: tuplesort.c:393
static bool grow_memtuples(Tuplesortstate *state)
Definition: tuplesort.c:1038
static void make_bounded_heap(Tuplesortstate *state)
Definition: tuplesort.c:2573
static bool consider_abort_common(Tuplesortstate *state)
Definition: tuplesort.c:1305

References Assert(), CHECK_FOR_INTERRUPTS, COMPARETUP, consider_abort_common(), SortTuple::datum1, dumptuples(), elog, ERROR, free_sort_tuple(), grow_memtuples(), inittapes(), LACKMEM, LEADER, LOG, make_bounded_heap(), MemoryContextSwitchTo(), pg_rusage_show(), REMOVEABBREV, trace_sort, TSS_BOUNDED, TSS_BUILDRUNS, TSS_INITIAL, tuplesort_heap_replace_top(), and USEMEM.

Referenced by tuplesort_putbrintuple(), tuplesort_putdatum(), tuplesort_putgintuple(), tuplesort_putheaptuple(), tuplesort_putindextuplevalues(), and tuplesort_puttupleslot().

◆ tuplesort_readtup_alloc()

void * tuplesort_readtup_alloc ( Tuplesortstate state,
Size  tuplen 
)

Definition at line 2867 of file tuplesort.c.

2868{
2869 SlabSlot *buf;
2870
2871 /*
2872 * We pre-allocate enough slots in the slab arena that we should never run
2873 * out.
2874 */
2875 Assert(state->slabFreeHead);
2876
2877 if (tuplen > SLAB_SLOT_SIZE || !state->slabFreeHead)
2878 return MemoryContextAlloc(state->base.sortcontext, tuplen);
2879 else
2880 {
2881 buf = state->slabFreeHead;
2882 /* Reuse this slot */
2883 state->slabFreeHead = buf->nextfree;
2884
2885 return buf;
2886 }
2887}

References Assert(), buf, MemoryContextAlloc(), and SLAB_SLOT_SIZE.

Referenced by readtup_cluster(), readtup_datum(), readtup_heap(), readtup_index(), readtup_index_brin(), and readtup_index_gin().

◆ tuplesort_rescan()

void tuplesort_rescan ( Tuplesortstate state)

Definition at line 2388 of file tuplesort.c.

2389{
2390 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2391
2392 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2393
2394 switch (state->status)
2395 {
2396 case TSS_SORTEDINMEM:
2397 state->current = 0;
2398 state->eof_reached = false;
2399 state->markpos_offset = 0;
2400 state->markpos_eof = false;
2401 break;
2402 case TSS_SORTEDONTAPE:
2403 LogicalTapeRewindForRead(state->result_tape, 0);
2404 state->eof_reached = false;
2405 state->markpos_block = 0L;
2406 state->markpos_offset = 0;
2407 state->markpos_eof = false;
2408 break;
2409 default:
2410 elog(ERROR, "invalid tuplesort state");
2411 break;
2412 }
2413
2414 MemoryContextSwitchTo(oldcontext);
2415}

References Assert(), elog, ERROR, LogicalTapeRewindForRead(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecReScanSort(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), and percentile_disc_multi_final().

◆ tuplesort_reset()

void tuplesort_reset ( Tuplesortstate state)

Definition at line 1005 of file tuplesort.c.

1006{
1009
1010 /*
1011 * After we've freed up per-batch memory, re-setup all of the state common
1012 * to both the first batch and any subsequent batch.
1013 */
1015
1016 state->lastReturnedTuple = NULL;
1017 state->slabMemoryBegin = NULL;
1018 state->slabMemoryEnd = NULL;
1019 state->slabFreeHead = NULL;
1020}

References tuplesort_begin_batch(), tuplesort_free(), and tuplesort_updatemax().

Referenced by ExecIncrementalSort(), ExecReScanIncrementalSort(), and switchToPresortedPrefixMode().

◆ tuplesort_restorepos()

void tuplesort_restorepos ( Tuplesortstate state)

Definition at line 2452 of file tuplesort.c.

2453{
2454 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2455
2456 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2457
2458 switch (state->status)
2459 {
2460 case TSS_SORTEDINMEM:
2461 state->current = state->markpos_offset;
2462 state->eof_reached = state->markpos_eof;
2463 break;
2464 case TSS_SORTEDONTAPE:
2465 LogicalTapeSeek(state->result_tape,
2466 state->markpos_block,
2467 state->markpos_offset);
2468 state->eof_reached = state->markpos_eof;
2469 break;
2470 default:
2471 elog(ERROR, "invalid tuplesort state");
2472 break;
2473 }
2474
2475 MemoryContextSwitchTo(oldcontext);
2476}
void LogicalTapeSeek(LogicalTape *lt, int64 blocknum, int offset)
Definition: logtape.c:1133

References Assert(), elog, ERROR, LogicalTapeSeek(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecSortRestrPos().

◆ tuplesort_set_bound()

void tuplesort_set_bound ( Tuplesortstate state,
int64  bound 
)

Definition at line 824 of file tuplesort.c.

825{
826 /* Assert we're called before loading any tuples */
827 Assert(state->status == TSS_INITIAL && state->memtupcount == 0);
828 /* Assert we allow bounded sorts */
829 Assert(state->base.sortopt & TUPLESORT_ALLOWBOUNDED);
830 /* Can't set the bound twice, either */
831 Assert(!state->bounded);
832 /* Also, this shouldn't be called in a parallel worker */
834
835 /* Parallel leader allows but ignores hint */
836 if (LEADER(state))
837 return;
838
839#ifdef DEBUG_BOUNDED_SORT
840 /* Honor GUC setting that disables the feature (for easy testing) */
841 if (!optimize_bounded_sort)
842 return;
843#endif
844
845 /* We want to be able to compute bound * 2, so limit the setting */
846 if (bound > (int64) (INT_MAX / 2))
847 return;
848
849 state->bounded = true;
850 state->bound = (int) bound;
851
852 /*
853 * Bounded sorts are not an effective target for abbreviated key
854 * optimization. Disable by setting state to be consistent with no
855 * abbreviation support.
856 */
857 state->base.sortKeys->abbrev_converter = NULL;
858 if (state->base.sortKeys->abbrev_full_comparator)
859 state->base.sortKeys->comparator = state->base.sortKeys->abbrev_full_comparator;
860
861 /* Not strictly necessary, but be tidy */
862 state->base.sortKeys->abbrev_abort = NULL;
863 state->base.sortKeys->abbrev_full_comparator = NULL;
864}
#define TUPLESORT_ALLOWBOUNDED
Definition: tuplesort.h:100

References Assert(), LEADER, TSS_INITIAL, TUPLESORT_ALLOWBOUNDED, and WORKER.

Referenced by ExecIncrementalSort(), ExecSort(), and switchToPresortedPrefixMode().

◆ tuplesort_skiptuples()

bool tuplesort_skiptuples ( Tuplesortstate state,
int64  ntuples,
bool  forward 
)

Definition at line 1696 of file tuplesort.c.

1697{
1698 MemoryContext oldcontext;
1699
1700 /*
1701 * We don't actually support backwards skip yet, because no callers need
1702 * it. The API is designed to allow for that later, though.
1703 */
1704 Assert(forward);
1705 Assert(ntuples >= 0);
1706 Assert(!WORKER(state));
1707
1708 switch (state->status)
1709 {
1710 case TSS_SORTEDINMEM:
1711 if (state->memtupcount - state->current >= ntuples)
1712 {
1713 state->current += ntuples;
1714 return true;
1715 }
1716 state->current = state->memtupcount;
1717 state->eof_reached = true;
1718
1719 /*
1720 * Complain if caller tries to retrieve more tuples than
1721 * originally asked for in a bounded sort. This is because
1722 * returning EOF here might be the wrong thing.
1723 */
1724 if (state->bounded && state->current >= state->bound)
1725 elog(ERROR, "retrieved too many tuples in a bounded sort");
1726
1727 return false;
1728
1729 case TSS_SORTEDONTAPE:
1730 case TSS_FINALMERGE:
1731
1732 /*
1733 * We could probably optimize these cases better, but for now it's
1734 * not worth the trouble.
1735 */
1736 oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1737 while (ntuples-- > 0)
1738 {
1739 SortTuple stup;
1740
1741 if (!tuplesort_gettuple_common(state, forward, &stup))
1742 {
1743 MemoryContextSwitchTo(oldcontext);
1744 return false;
1745 }
1747 }
1748 MemoryContextSwitchTo(oldcontext);
1749 return true;
1750
1751 default:
1752 elog(ERROR, "invalid tuplesort state");
1753 return false; /* keep compiler quiet */
1754 }
1755}
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
Definition: tuplesort.c:1456

References Assert(), CHECK_FOR_INTERRUPTS, elog, ERROR, MemoryContextSwitchTo(), TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, tuplesort_gettuple_common(), and WORKER.

Referenced by percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), and percentile_disc_multi_final().

◆ tuplesort_sort_memtuples()

static void tuplesort_sort_memtuples ( Tuplesortstate state)
static

Definition at line 2662 of file tuplesort.c.

2663{
2664 Assert(!LEADER(state));
2665
2666 if (state->memtupcount > 1)
2667 {
2668 /*
2669 * Do we have the leading column's value or abbreviation in datum1,
2670 * and is there a specialization for its comparator?
2671 */
2672 if (state->base.haveDatum1 && state->base.sortKeys)
2673 {
2674 if (state->base.sortKeys[0].comparator == ssup_datum_unsigned_cmp)
2675 {
2676 qsort_tuple_unsigned(state->memtuples,
2677 state->memtupcount,
2678 state);
2679 return;
2680 }
2681 else if (state->base.sortKeys[0].comparator == ssup_datum_signed_cmp)
2682 {
2683 qsort_tuple_signed(state->memtuples,
2684 state->memtupcount,
2685 state);
2686 return;
2687 }
2688 else if (state->base.sortKeys[0].comparator == ssup_datum_int32_cmp)
2689 {
2690 qsort_tuple_int32(state->memtuples,
2691 state->memtupcount,
2692 state);
2693 return;
2694 }
2695 }
2696
2697 /* Can we use the single-key sort function? */
2698 if (state->base.onlyKey != NULL)
2699 {
2700 qsort_ssup(state->memtuples, state->memtupcount,
2701 state->base.onlyKey);
2702 }
2703 else
2704 {
2705 qsort_tuple(state->memtuples,
2706 state->memtupcount,
2707 state->base.comparetup,
2708 state);
2709 }
2710 }
2711}
int ssup_datum_signed_cmp(Datum x, Datum y, SortSupport ssup)
Definition: tuplesort.c:3134
int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup)
Definition: tuplesort.c:3123
int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup)
Definition: tuplesort.c:3148

References Assert(), LEADER, ssup_datum_int32_cmp(), ssup_datum_signed_cmp(), and ssup_datum_unsigned_cmp().

Referenced by dumptuples(), and tuplesort_performsort().

◆ tuplesort_space_type_name()

const char * tuplesort_space_type_name ( TuplesortSpaceType  t)

Definition at line 2552 of file tuplesort.c.

2553{
2555 return t == SORT_SPACE_TYPE_DISK ? "Disk" : "Memory";
2556}

References Assert(), SORT_SPACE_TYPE_DISK, and SORT_SPACE_TYPE_MEMORY.

Referenced by show_incremental_sort_group_info(), and show_sort_info().

◆ tuplesort_updatemax()

static void tuplesort_updatemax ( Tuplesortstate state)
static

Definition at line 954 of file tuplesort.c.

955{
956 int64 spaceUsed;
957 bool isSpaceDisk;
958
959 /*
960 * Note: it might seem we should provide both memory and disk usage for a
961 * disk-based sort. However, the current code doesn't track memory space
962 * accurately once we have begun to return tuples to the caller (since we
963 * don't account for pfree's the caller is expected to do), so we cannot
964 * rely on availMem in a disk sort. This does not seem worth the overhead
965 * to fix. Is it worth creating an API for the memory context code to
966 * tell us how much is actually used in sortcontext?
967 */
968 if (state->tapeset)
969 {
970 isSpaceDisk = true;
971 spaceUsed = LogicalTapeSetBlocks(state->tapeset) * BLCKSZ;
972 }
973 else
974 {
975 isSpaceDisk = false;
976 spaceUsed = state->allowedMem - state->availMem;
977 }
978
979 /*
980 * Sort evicts data to the disk when it wasn't able to fit that data into
981 * main memory. This is why we assume space used on the disk to be more
982 * important for tracking resource usage than space used in memory. Note
983 * that the amount of space occupied by some tupleset on the disk might be
984 * less than amount of space occupied by the same tupleset in memory due
985 * to more compact representation.
986 */
987 if ((isSpaceDisk && !state->isMaxSpaceDisk) ||
988 (isSpaceDisk == state->isMaxSpaceDisk && spaceUsed > state->maxSpace))
989 {
990 state->maxSpace = spaceUsed;
991 state->isMaxSpaceDisk = isSpaceDisk;
992 state->maxSpaceStatus = state->status;
993 }
994}

References LogicalTapeSetBlocks().

Referenced by tuplesort_get_stats(), and tuplesort_reset().

◆ tuplesort_used_bound()

bool tuplesort_used_bound ( Tuplesortstate state)

Definition at line 872 of file tuplesort.c.

873{
874 return state->boundUsed;
875}

Referenced by ExecIncrementalSort().

◆ worker_freeze_result_tape()

static void worker_freeze_result_tape ( Tuplesortstate state)
static

Definition at line 2993 of file tuplesort.c.

2994{
2995 Sharedsort *shared = state->shared;
2997
2999 Assert(state->result_tape != NULL);
3000 Assert(state->memtupcount == 0);
3001
3002 /*
3003 * Free most remaining memory, in case caller is sensitive to our holding
3004 * on to it. memtuples may not be a tiny merge heap at this point.
3005 */
3006 pfree(state->memtuples);
3007 /* Be tidy */
3008 state->memtuples = NULL;
3009 state->memtupsize = 0;
3010
3011 /*
3012 * Parallel worker requires result tape metadata, which is to be stored in
3013 * shared memory for leader
3014 */
3015 LogicalTapeFreeze(state->result_tape, &output);
3016
3017 /* Store properties of output tape, and update finished worker count */
3018 SpinLockAcquire(&shared->mutex);
3019 shared->tapes[state->worker] = output;
3020 shared->workersFinished++;
3021 SpinLockRelease(&shared->mutex);
3022}
FILE * output

References Assert(), LogicalTapeFreeze(), Sharedsort::mutex, output, pfree(), SpinLockAcquire, SpinLockRelease, Sharedsort::tapes, WORKER, and Sharedsort::workersFinished.

Referenced by mergeruns(), and worker_nomergeruns().

◆ worker_get_identifier()

static int worker_get_identifier ( Tuplesortstate state)
static

Definition at line 2965 of file tuplesort.c.

2966{
2967 Sharedsort *shared = state->shared;
2968 int worker;
2969
2971
2972 SpinLockAcquire(&shared->mutex);
2973 worker = shared->currentWorker++;
2974 SpinLockRelease(&shared->mutex);
2975
2976 return worker;
2977}

References Assert(), Sharedsort::currentWorker, Sharedsort::mutex, SpinLockAcquire, SpinLockRelease, and WORKER.

Referenced by tuplesort_begin_common().

◆ worker_nomergeruns()

static void worker_nomergeruns ( Tuplesortstate state)
static

Definition at line 3031 of file tuplesort.c.

3032{
3034 Assert(state->result_tape == NULL);
3035 Assert(state->nOutputRuns == 1);
3036
3037 state->result_tape = state->destTape;
3039}

References Assert(), WORKER, and worker_freeze_result_tape().

Referenced by tuplesort_performsort().

Variable Documentation

◆ trace_sort