PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
tuplesort.c File Reference
#include "postgres.h"
#include <limits.h>
#include "commands/tablespace.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "storage/shmem.h"
#include "utils/guc.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/tuplesort.h"
#include "lib/sort_template.h"
Include dependency graph for tuplesort.c:

Go to the source code of this file.

Data Structures

union  SlabSlot
 
struct  Tuplesortstate
 
struct  Sharedsort
 

Macros

#define INITIAL_MEMTUPSIZE
 
#define SLAB_SLOT_SIZE   1024
 
#define MINORDER   6 /* minimum merge order */
 
#define MAXORDER   500 /* maximum merge order */
 
#define TAPE_BUFFER_OVERHEAD   BLCKSZ
 
#define MERGE_BUFFER_SIZE   (BLCKSZ * 32)
 
#define IS_SLAB_SLOT(state, tuple)
 
#define RELEASE_SLAB_SLOT(state, tuple)
 
#define REMOVEABBREV(state, stup, count)   ((*(state)->base.removeabbrev) (state, stup, count))
 
#define COMPARETUP(state, a, b)   ((*(state)->base.comparetup) (a, b, state))
 
#define WRITETUP(state, tape, stup)   ((*(state)->base.writetup) (state, tape, stup))
 
#define READTUP(state, stup, tape, len)   ((*(state)->base.readtup) (state, stup, tape, len))
 
#define FREESTATE(state)   ((state)->base.freestate ? (*(state)->base.freestate) (state) : (void) 0)
 
#define LACKMEM(state)   ((state)->availMem < 0 && !(state)->slabAllocatorUsed)
 
#define USEMEM(state, amt)   ((state)->availMem -= (amt))
 
#define FREEMEM(state, amt)   ((state)->availMem += (amt))
 
#define SERIAL(state)   ((state)->shared == NULL)
 
#define WORKER(state)   ((state)->shared && (state)->worker != -1)
 
#define LEADER(state)   ((state)->shared && (state)->worker == -1)
 
#define ST_SORT   qsort_tuple_unsigned
 
#define ST_ELEMENT_TYPE   SortTuple
 
#define ST_COMPARE(a, b, state)   qsort_tuple_unsigned_compare(a, b, state)
 
#define ST_COMPARE_ARG_TYPE   Tuplesortstate
 
#define ST_CHECK_FOR_INTERRUPTS
 
#define ST_SCOPE   static
 
#define ST_DEFINE
 
#define ST_SORT   qsort_tuple_signed
 
#define ST_ELEMENT_TYPE   SortTuple
 
#define ST_COMPARE(a, b, state)   qsort_tuple_signed_compare(a, b, state)
 
#define ST_COMPARE_ARG_TYPE   Tuplesortstate
 
#define ST_CHECK_FOR_INTERRUPTS
 
#define ST_SCOPE   static
 
#define ST_DEFINE
 
#define ST_SORT   qsort_tuple_int32
 
#define ST_ELEMENT_TYPE   SortTuple
 
#define ST_COMPARE(a, b, state)   qsort_tuple_int32_compare(a, b, state)
 
#define ST_COMPARE_ARG_TYPE   Tuplesortstate
 
#define ST_CHECK_FOR_INTERRUPTS
 
#define ST_SCOPE   static
 
#define ST_DEFINE
 
#define ST_SORT   qsort_tuple
 
#define ST_ELEMENT_TYPE   SortTuple
 
#define ST_COMPARE_RUNTIME_POINTER
 
#define ST_COMPARE_ARG_TYPE   Tuplesortstate
 
#define ST_CHECK_FOR_INTERRUPTS
 
#define ST_SCOPE   static
 
#define ST_DECLARE
 
#define ST_DEFINE
 
#define ST_SORT   qsort_ssup
 
#define ST_ELEMENT_TYPE   SortTuple
 
#define ST_COMPARE(a, b, ssup)
 
#define ST_COMPARE_ARG_TYPE   SortSupportData
 
#define ST_CHECK_FOR_INTERRUPTS
 
#define ST_SCOPE   static
 
#define ST_DEFINE
 

Typedefs

typedef union SlabSlot SlabSlot
 

Enumerations

enum  TupSortStatus {
  TSS_INITIAL , TSS_BOUNDED , TSS_BUILDRUNS , TSS_SORTEDINMEM ,
  TSS_SORTEDONTAPE , TSS_FINALMERGE
}
 

Functions

static void tuplesort_begin_batch (Tuplesortstate *state)
 
static bool consider_abort_common (Tuplesortstate *state)
 
static void inittapes (Tuplesortstate *state, bool mergeruns)
 
static void inittapestate (Tuplesortstate *state, int maxTapes)
 
static void selectnewtape (Tuplesortstate *state)
 
static void init_slab_allocator (Tuplesortstate *state, int numSlots)
 
static void mergeruns (Tuplesortstate *state)
 
static void mergeonerun (Tuplesortstate *state)
 
static void beginmerge (Tuplesortstate *state)
 
static bool mergereadnext (Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
 
static void dumptuples (Tuplesortstate *state, bool alltuples)
 
static void make_bounded_heap (Tuplesortstate *state)
 
static void sort_bounded_heap (Tuplesortstate *state)
 
static void tuplesort_sort_memtuples (Tuplesortstate *state)
 
static void tuplesort_heap_insert (Tuplesortstate *state, SortTuple *tuple)
 
static void tuplesort_heap_replace_top (Tuplesortstate *state, SortTuple *tuple)
 
static void tuplesort_heap_delete_top (Tuplesortstate *state)
 
static void reversedirection (Tuplesortstate *state)
 
static unsigned int getlen (LogicalTape *tape, bool eofOK)
 
static void markrunend (LogicalTape *tape)
 
static int worker_get_identifier (Tuplesortstate *state)
 
static void worker_freeze_result_tape (Tuplesortstate *state)
 
static void worker_nomergeruns (Tuplesortstate *state)
 
static void leader_takeover_tapes (Tuplesortstate *state)
 
static void free_sort_tuple (Tuplesortstate *state, SortTuple *stup)
 
static void tuplesort_free (Tuplesortstate *state)
 
static void tuplesort_updatemax (Tuplesortstate *state)
 
static pg_attribute_always_inline int qsort_tuple_unsigned_compare (SortTuple *a, SortTuple *b, Tuplesortstate *state)
 
static pg_attribute_always_inline int qsort_tuple_signed_compare (SortTuple *a, SortTuple *b, Tuplesortstate *state)
 
static pg_attribute_always_inline int qsort_tuple_int32_compare (SortTuple *a, SortTuple *b, Tuplesortstate *state)
 
Tuplesortstatetuplesort_begin_common (int workMem, SortCoordinate coordinate, int sortopt)
 
void tuplesort_set_bound (Tuplesortstate *state, int64 bound)
 
bool tuplesort_used_bound (Tuplesortstate *state)
 
void tuplesort_end (Tuplesortstate *state)
 
void tuplesort_reset (Tuplesortstate *state)
 
static bool grow_memtuples (Tuplesortstate *state)
 
void tuplesort_puttuple_common (Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
 
void tuplesort_performsort (Tuplesortstate *state)
 
bool tuplesort_gettuple_common (Tuplesortstate *state, bool forward, SortTuple *stup)
 
bool tuplesort_skiptuples (Tuplesortstate *state, int64 ntuples, bool forward)
 
int tuplesort_merge_order (int64 allowedMem)
 
static int64 merge_read_buffer_size (int64 avail_mem, int nInputTapes, int nInputRuns, int maxOutputTapes)
 
void tuplesort_rescan (Tuplesortstate *state)
 
void tuplesort_markpos (Tuplesortstate *state)
 
void tuplesort_restorepos (Tuplesortstate *state)
 
void tuplesort_get_stats (Tuplesortstate *state, TuplesortInstrumentation *stats)
 
const char * tuplesort_method_name (TuplesortMethod m)
 
const char * tuplesort_space_type_name (TuplesortSpaceType t)
 
void * tuplesort_readtup_alloc (Tuplesortstate *state, Size tuplen)
 
Size tuplesort_estimate_shared (int nWorkers)
 
void tuplesort_initialize_shared (Sharedsort *shared, int nWorkers, dsm_segment *seg)
 
void tuplesort_attach_shared (Sharedsort *shared, dsm_segment *seg)
 
int ssup_datum_unsigned_cmp (Datum x, Datum y, SortSupport ssup)
 
int ssup_datum_signed_cmp (Datum x, Datum y, SortSupport ssup)
 
int ssup_datum_int32_cmp (Datum x, Datum y, SortSupport ssup)
 

Variables

bool trace_sort = false
 

Macro Definition Documentation

◆ COMPARETUP

#define COMPARETUP (   state,
  a,
  b 
)    ((*(state)->base.comparetup) (a, b, state))

Definition at line 396 of file tuplesort.c.

◆ FREEMEM

#define FREEMEM (   state,
  amt 
)    ((state)->availMem += (amt))

Definition at line 402 of file tuplesort.c.

◆ FREESTATE

#define FREESTATE (   state)    ((state)->base.freestate ? (*(state)->base.freestate) (state) : (void) 0)

Definition at line 399 of file tuplesort.c.

◆ INITIAL_MEMTUPSIZE

#define INITIAL_MEMTUPSIZE
Value:
Max(1024, \
#define Max(x, y)
Definition: c.h:997
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:187

Definition at line 120 of file tuplesort.c.

◆ IS_SLAB_SLOT

#define IS_SLAB_SLOT (   state,
  tuple 
)
Value:
((char *) (tuple) >= (state)->slabMemoryBegin && \
(char *) (tuple) < (state)->slabMemoryEnd)
Definition: regguts.h:323

Definition at line 375 of file tuplesort.c.

◆ LACKMEM

#define LACKMEM (   state)    ((state)->availMem < 0 && !(state)->slabAllocatorUsed)

Definition at line 400 of file tuplesort.c.

◆ LEADER

#define LEADER (   state)    ((state)->shared && (state)->worker == -1)

Definition at line 405 of file tuplesort.c.

◆ MAXORDER

#define MAXORDER   500 /* maximum merge order */

Definition at line 177 of file tuplesort.c.

◆ MERGE_BUFFER_SIZE

#define MERGE_BUFFER_SIZE   (BLCKSZ * 32)

Definition at line 179 of file tuplesort.c.

◆ MINORDER

#define MINORDER   6 /* minimum merge order */

Definition at line 176 of file tuplesort.c.

◆ READTUP

#define READTUP (   state,
  stup,
  tape,
  len 
)    ((*(state)->base.readtup) (state, stup, tape, len))

Definition at line 398 of file tuplesort.c.

◆ RELEASE_SLAB_SLOT

#define RELEASE_SLAB_SLOT (   state,
  tuple 
)
Value:
do { \
SlabSlot *buf = (SlabSlot *) tuple; \
{ \
buf->nextfree = (state)->slabFreeHead; \
(state)->slabFreeHead = buf; \
} while(0)
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
void pfree(void *pointer)
Definition: mcxt.c:1594
static char * buf
Definition: pg_test_fsync.c:72
#define IS_SLAB_SLOT(state, tuple)
Definition: tuplesort.c:375

Definition at line 383 of file tuplesort.c.

◆ REMOVEABBREV

#define REMOVEABBREV (   state,
  stup,
  count 
)    ((*(state)->base.removeabbrev) (state, stup, count))

Definition at line 395 of file tuplesort.c.

◆ SERIAL

#define SERIAL (   state)    ((state)->shared == NULL)

Definition at line 403 of file tuplesort.c.

◆ SLAB_SLOT_SIZE

#define SLAB_SLOT_SIZE   1024

Definition at line 142 of file tuplesort.c.

◆ ST_CHECK_FOR_INTERRUPTS [1/5]

#define ST_CHECK_FOR_INTERRUPTS

Definition at line 613 of file tuplesort.c.

◆ ST_CHECK_FOR_INTERRUPTS [2/5]

#define ST_CHECK_FOR_INTERRUPTS

Definition at line 613 of file tuplesort.c.

◆ ST_CHECK_FOR_INTERRUPTS [3/5]

#define ST_CHECK_FOR_INTERRUPTS

Definition at line 613 of file tuplesort.c.

◆ ST_CHECK_FOR_INTERRUPTS [4/5]

#define ST_CHECK_FOR_INTERRUPTS

Definition at line 613 of file tuplesort.c.

◆ ST_CHECK_FOR_INTERRUPTS [5/5]

#define ST_CHECK_FOR_INTERRUPTS

Definition at line 613 of file tuplesort.c.

◆ ST_COMPARE [1/4]

#define ST_COMPARE (   a,
  b,
  ssup 
)
Value:
ApplySortComparator((a)->datum1, (a)->isnull1, \
(b)->datum1, (b)->isnull1, (ssup))
int b
Definition: isn.c:74
int a
Definition: isn.c:73
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:200

Definition at line 609 of file tuplesort.c.

◆ ST_COMPARE [2/4]

#define ST_COMPARE (   a,
  b,
  state 
)    qsort_tuple_unsigned_compare(a, b, state)

Definition at line 609 of file tuplesort.c.

◆ ST_COMPARE [3/4]

#define ST_COMPARE (   a,
  b,
  state 
)    qsort_tuple_signed_compare(a, b, state)

Definition at line 609 of file tuplesort.c.

◆ ST_COMPARE [4/4]

#define ST_COMPARE (   a,
  b,
  state 
)    qsort_tuple_int32_compare(a, b, state)

Definition at line 609 of file tuplesort.c.

◆ ST_COMPARE_ARG_TYPE [1/5]

#define ST_COMPARE_ARG_TYPE   Tuplesortstate

Definition at line 612 of file tuplesort.c.

◆ ST_COMPARE_ARG_TYPE [2/5]

#define ST_COMPARE_ARG_TYPE   Tuplesortstate

Definition at line 612 of file tuplesort.c.

◆ ST_COMPARE_ARG_TYPE [3/5]

#define ST_COMPARE_ARG_TYPE   Tuplesortstate

Definition at line 612 of file tuplesort.c.

◆ ST_COMPARE_ARG_TYPE [4/5]

#define ST_COMPARE_ARG_TYPE   Tuplesortstate

Definition at line 612 of file tuplesort.c.

◆ ST_COMPARE_ARG_TYPE [5/5]

#define ST_COMPARE_ARG_TYPE   SortSupportData

Definition at line 612 of file tuplesort.c.

◆ ST_COMPARE_RUNTIME_POINTER

#define ST_COMPARE_RUNTIME_POINTER

Definition at line 599 of file tuplesort.c.

◆ ST_DECLARE

#define ST_DECLARE

Definition at line 603 of file tuplesort.c.

◆ ST_DEFINE [1/5]

#define ST_DEFINE

Definition at line 615 of file tuplesort.c.

◆ ST_DEFINE [2/5]

#define ST_DEFINE

Definition at line 615 of file tuplesort.c.

◆ ST_DEFINE [3/5]

#define ST_DEFINE

Definition at line 615 of file tuplesort.c.

◆ ST_DEFINE [4/5]

#define ST_DEFINE

Definition at line 615 of file tuplesort.c.

◆ ST_DEFINE [5/5]

#define ST_DEFINE

Definition at line 615 of file tuplesort.c.

◆ ST_ELEMENT_TYPE [1/5]

#define ST_ELEMENT_TYPE   SortTuple

Definition at line 608 of file tuplesort.c.

◆ ST_ELEMENT_TYPE [2/5]

#define ST_ELEMENT_TYPE   SortTuple

Definition at line 608 of file tuplesort.c.

◆ ST_ELEMENT_TYPE [3/5]

#define ST_ELEMENT_TYPE   SortTuple

Definition at line 608 of file tuplesort.c.

◆ ST_ELEMENT_TYPE [4/5]

#define ST_ELEMENT_TYPE   SortTuple

Definition at line 608 of file tuplesort.c.

◆ ST_ELEMENT_TYPE [5/5]

#define ST_ELEMENT_TYPE   SortTuple

Definition at line 608 of file tuplesort.c.

◆ ST_SCOPE [1/5]

#define ST_SCOPE   static

Definition at line 614 of file tuplesort.c.

◆ ST_SCOPE [2/5]

#define ST_SCOPE   static

Definition at line 614 of file tuplesort.c.

◆ ST_SCOPE [3/5]

#define ST_SCOPE   static

Definition at line 614 of file tuplesort.c.

◆ ST_SCOPE [4/5]

#define ST_SCOPE   static

Definition at line 614 of file tuplesort.c.

◆ ST_SCOPE [5/5]

#define ST_SCOPE   static

Definition at line 614 of file tuplesort.c.

◆ ST_SORT [1/5]

#define ST_SORT   qsort_tuple_unsigned

Definition at line 607 of file tuplesort.c.

◆ ST_SORT [2/5]

#define ST_SORT   qsort_tuple_signed

Definition at line 607 of file tuplesort.c.

◆ ST_SORT [3/5]

#define ST_SORT   qsort_tuple_int32

Definition at line 607 of file tuplesort.c.

◆ ST_SORT [4/5]

#define ST_SORT   qsort_tuple

Definition at line 607 of file tuplesort.c.

◆ ST_SORT [5/5]

#define ST_SORT   qsort_ssup

Definition at line 607 of file tuplesort.c.

◆ TAPE_BUFFER_OVERHEAD

#define TAPE_BUFFER_OVERHEAD   BLCKSZ

Definition at line 178 of file tuplesort.c.

◆ USEMEM

#define USEMEM (   state,
  amt 
)    ((state)->availMem -= (amt))

Definition at line 401 of file tuplesort.c.

◆ WORKER

#define WORKER (   state)    ((state)->shared && (state)->worker != -1)

Definition at line 404 of file tuplesort.c.

◆ WRITETUP

#define WRITETUP (   state,
  tape,
  stup 
)    ((*(state)->base.writetup) (state, tape, stup))

Definition at line 397 of file tuplesort.c.

Typedef Documentation

◆ SlabSlot

typedef union SlabSlot SlabSlot

Enumeration Type Documentation

◆ TupSortStatus

Enumerator
TSS_INITIAL 
TSS_BOUNDED 
TSS_BUILDRUNS 
TSS_SORTEDINMEM 
TSS_SORTEDONTAPE 
TSS_FINALMERGE 

Definition at line 154 of file tuplesort.c.

155{
156 TSS_INITIAL, /* Loading tuples; still within memory limit */
157 TSS_BOUNDED, /* Loading tuples into bounded-size heap */
158 TSS_BUILDRUNS, /* Loading tuples; writing to tape */
159 TSS_SORTEDINMEM, /* Sort completed entirely in memory */
160 TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */
161 TSS_FINALMERGE, /* Performing final merge on-the-fly */
TupSortStatus
Definition: tuplesort.c:155
@ TSS_SORTEDONTAPE
Definition: tuplesort.c:160
@ TSS_SORTEDINMEM
Definition: tuplesort.c:159
@ TSS_INITIAL
Definition: tuplesort.c:156
@ TSS_FINALMERGE
Definition: tuplesort.c:161
@ TSS_BUILDRUNS
Definition: tuplesort.c:158
@ TSS_BOUNDED
Definition: tuplesort.c:157

Function Documentation

◆ beginmerge()

static void beginmerge ( Tuplesortstate state)
static

Definition at line 2256 of file tuplesort.c.

2257{
2258 int activeTapes;
2259 int srcTapeIndex;
2260
2261 /* Heap should be empty here */
2262 Assert(state->memtupcount == 0);
2263
2264 activeTapes = Min(state->nInputTapes, state->nInputRuns);
2265
2266 for (srcTapeIndex = 0; srcTapeIndex < activeTapes; srcTapeIndex++)
2267 {
2268 SortTuple tup;
2269
2270 if (mergereadnext(state, state->inputTapes[srcTapeIndex], &tup))
2271 {
2272 tup.srctape = srcTapeIndex;
2274 }
2275 }
2276}
#define Min(x, y)
Definition: c.h:1003
Assert(PointerIsAligned(start, uint64))
int srctape
Definition: tuplesort.h:153
static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple)
Definition: tuplesort.c:2733
static bool mergereadnext(Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
Definition: tuplesort.c:2284

References Assert(), mergereadnext(), Min, SortTuple::srctape, and tuplesort_heap_insert().

Referenced by mergeonerun(), and mergeruns().

◆ consider_abort_common()

static bool consider_abort_common ( Tuplesortstate state)
static

Definition at line 1315 of file tuplesort.c.

1316{
1317 Assert(state->base.sortKeys[0].abbrev_converter != NULL);
1318 Assert(state->base.sortKeys[0].abbrev_abort != NULL);
1319 Assert(state->base.sortKeys[0].abbrev_full_comparator != NULL);
1320
1321 /*
1322 * Check effectiveness of abbreviation optimization. Consider aborting
1323 * when still within memory limit.
1324 */
1325 if (state->status == TSS_INITIAL &&
1326 state->memtupcount >= state->abbrevNext)
1327 {
1328 state->abbrevNext *= 2;
1329
1330 /*
1331 * Check opclass-supplied abbreviation abort routine. It may indicate
1332 * that abbreviation should not proceed.
1333 */
1334 if (!state->base.sortKeys->abbrev_abort(state->memtupcount,
1335 state->base.sortKeys))
1336 return false;
1337
1338 /*
1339 * Finally, restore authoritative comparator, and indicate that
1340 * abbreviation is not in play by setting abbrev_converter to NULL
1341 */
1342 state->base.sortKeys[0].comparator = state->base.sortKeys[0].abbrev_full_comparator;
1343 state->base.sortKeys[0].abbrev_converter = NULL;
1344 /* Not strictly necessary, but be tidy */
1345 state->base.sortKeys[0].abbrev_abort = NULL;
1346 state->base.sortKeys[0].abbrev_full_comparator = NULL;
1347
1348 /* Give up - expect original pass-by-value representation */
1349 return true;
1350 }
1351
1352 return false;
1353}

References Assert(), and TSS_INITIAL.

Referenced by tuplesort_puttuple_common().

◆ dumptuples()

static void dumptuples ( Tuplesortstate state,
bool  alltuples 
)
static

Definition at line 2303 of file tuplesort.c.

2304{
2305 int memtupwrite;
2306 int i;
2307
2308 /*
2309 * Nothing to do if we still fit in available memory and have array slots,
2310 * unless this is the final call during initial run generation.
2311 */
2312 if (state->memtupcount < state->memtupsize && !LACKMEM(state) &&
2313 !alltuples)
2314 return;
2315
2316 /*
2317 * Final call might require no sorting, in rare cases where we just so
2318 * happen to have previously LACKMEM()'d at the point where exactly all
2319 * remaining tuples are loaded into memory, just before input was
2320 * exhausted. In general, short final runs are quite possible, but avoid
2321 * creating a completely empty run. In a worker, though, we must produce
2322 * at least one tape, even if it's empty.
2323 */
2324 if (state->memtupcount == 0 && state->currentRun > 0)
2325 return;
2326
2327 Assert(state->status == TSS_BUILDRUNS);
2328
2329 /*
2330 * It seems unlikely that this limit will ever be exceeded, but take no
2331 * chances
2332 */
2333 if (state->currentRun == INT_MAX)
2334 ereport(ERROR,
2335 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2336 errmsg("cannot have more than %d runs for an external sort",
2337 INT_MAX)));
2338
2339 if (state->currentRun > 0)
2341
2342 state->currentRun++;
2343
2344 if (trace_sort)
2345 elog(LOG, "worker %d starting quicksort of run %d: %s",
2346 state->worker, state->currentRun,
2347 pg_rusage_show(&state->ru_start));
2348
2349 /*
2350 * Sort all tuples accumulated within the allowed amount of memory for
2351 * this run using quicksort
2352 */
2354
2355 if (trace_sort)
2356 elog(LOG, "worker %d finished quicksort of run %d: %s",
2357 state->worker, state->currentRun,
2358 pg_rusage_show(&state->ru_start));
2359
2360 memtupwrite = state->memtupcount;
2361 for (i = 0; i < memtupwrite; i++)
2362 {
2363 SortTuple *stup = &state->memtuples[i];
2364
2365 WRITETUP(state, state->destTape, stup);
2366 }
2367
2368 state->memtupcount = 0;
2369
2370 /*
2371 * Reset tuple memory. We've freed all of the tuples that we previously
2372 * allocated. It's important to avoid fragmentation when there is a stark
2373 * change in the sizes of incoming tuples. In bounded sorts,
2374 * fragmentation due to AllocSetFree's bucketing by size class might be
2375 * particularly bad if this step wasn't taken.
2376 */
2377 MemoryContextReset(state->base.tuplecontext);
2378
2379 /*
2380 * Now update the memory accounting to subtract the memory used by the
2381 * tuple.
2382 */
2383 FREEMEM(state, state->tupleMem);
2384 state->tupleMem = 0;
2385
2386 markrunend(state->destTape);
2387
2388 if (trace_sort)
2389 elog(LOG, "worker %d finished writing run %d to tape %d: %s",
2390 state->worker, state->currentRun, (state->currentRun - 1) % state->nOutputTapes + 1,
2391 pg_rusage_show(&state->ru_start));
2392}
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define LOG
Definition: elog.h:31
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
int i
Definition: isn.c:77
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:400
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
static void selectnewtape(Tuplesortstate *state)
Definition: tuplesort.c:1944
static void markrunend(LogicalTape *tape)
Definition: tuplesort.c:2863
#define LACKMEM(state)
Definition: tuplesort.c:400
#define WRITETUP(state, tape, stup)
Definition: tuplesort.c:397
#define FREEMEM(state, amt)
Definition: tuplesort.c:402
static void tuplesort_sort_memtuples(Tuplesortstate *state)
Definition: tuplesort.c:2672
bool trace_sort
Definition: tuplesort.c:124

References Assert(), elog, ereport, errcode(), errmsg(), ERROR, FREEMEM, i, LACKMEM, LOG, markrunend(), MemoryContextReset(), pg_rusage_show(), selectnewtape(), trace_sort, TSS_BUILDRUNS, tuplesort_sort_memtuples(), and WRITETUP.

Referenced by tuplesort_performsort(), and tuplesort_puttuple_common().

◆ free_sort_tuple()

static void free_sort_tuple ( Tuplesortstate state,
SortTuple stup 
)
static

Definition at line 3122 of file tuplesort.c.

3123{
3124 if (stup->tuple)
3125 {
3127 pfree(stup->tuple);
3128 stup->tuple = NULL;
3129 }
3130}
Size GetMemoryChunkSpace(void *pointer)
Definition: mcxt.c:767
void * tuple
Definition: tuplesort.h:150

References FREEMEM, GetMemoryChunkSpace(), pfree(), and SortTuple::tuple.

Referenced by make_bounded_heap(), and tuplesort_puttuple_common().

◆ getlen()

static unsigned int getlen ( LogicalTape tape,
bool  eofOK 
)
static

Definition at line 2850 of file tuplesort.c.

2851{
2852 unsigned int len;
2853
2854 if (LogicalTapeRead(tape,
2855 &len, sizeof(len)) != sizeof(len))
2856 elog(ERROR, "unexpected end of tape");
2857 if (len == 0 && !eofOK)
2858 elog(ERROR, "unexpected end of data");
2859 return len;
2860}
size_t LogicalTapeRead(LogicalTape *lt, void *ptr, size_t size)
Definition: logtape.c:928
const void size_t len

References elog, ERROR, len, and LogicalTapeRead().

Referenced by mergereadnext(), and tuplesort_gettuple_common().

◆ grow_memtuples()

static bool grow_memtuples ( Tuplesortstate state)
static

Definition at line 1048 of file tuplesort.c.

1049{
1050 int newmemtupsize;
1051 int memtupsize = state->memtupsize;
1052 int64 memNowUsed = state->allowedMem - state->availMem;
1053
1054 /* Forget it if we've already maxed out memtuples, per comment above */
1055 if (!state->growmemtuples)
1056 return false;
1057
1058 /* Select new value of memtupsize */
1059 if (memNowUsed <= state->availMem)
1060 {
1061 /*
1062 * We've used no more than half of allowedMem; double our usage,
1063 * clamping at INT_MAX tuples.
1064 */
1065 if (memtupsize < INT_MAX / 2)
1066 newmemtupsize = memtupsize * 2;
1067 else
1068 {
1069 newmemtupsize = INT_MAX;
1070 state->growmemtuples = false;
1071 }
1072 }
1073 else
1074 {
1075 /*
1076 * This will be the last increment of memtupsize. Abandon doubling
1077 * strategy and instead increase as much as we safely can.
1078 *
1079 * To stay within allowedMem, we can't increase memtupsize by more
1080 * than availMem / sizeof(SortTuple) elements. In practice, we want
1081 * to increase it by considerably less, because we need to leave some
1082 * space for the tuples to which the new array slots will refer. We
1083 * assume the new tuples will be about the same size as the tuples
1084 * we've already seen, and thus we can extrapolate from the space
1085 * consumption so far to estimate an appropriate new size for the
1086 * memtuples array. The optimal value might be higher or lower than
1087 * this estimate, but it's hard to know that in advance. We again
1088 * clamp at INT_MAX tuples.
1089 *
1090 * This calculation is safe against enlarging the array so much that
1091 * LACKMEM becomes true, because the memory currently used includes
1092 * the present array; thus, there would be enough allowedMem for the
1093 * new array elements even if no other memory were currently used.
1094 *
1095 * We do the arithmetic in float8, because otherwise the product of
1096 * memtupsize and allowedMem could overflow. Any inaccuracy in the
1097 * result should be insignificant; but even if we computed a
1098 * completely insane result, the checks below will prevent anything
1099 * really bad from happening.
1100 */
1101 double grow_ratio;
1102
1103 grow_ratio = (double) state->allowedMem / (double) memNowUsed;
1104 if (memtupsize * grow_ratio < INT_MAX)
1105 newmemtupsize = (int) (memtupsize * grow_ratio);
1106 else
1107 newmemtupsize = INT_MAX;
1108
1109 /* We won't make any further enlargement attempts */
1110 state->growmemtuples = false;
1111 }
1112
1113 /* Must enlarge array by at least one element, else report failure */
1114 if (newmemtupsize <= memtupsize)
1115 goto noalloc;
1116
1117 /*
1118 * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
1119 * to ensure our request won't be rejected. Note that we can easily
1120 * exhaust address space before facing this outcome. (This is presently
1121 * impossible due to guc.c's MAX_KILOBYTES limitation on work_mem, but
1122 * don't rely on that at this distance.)
1123 */
1124 if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
1125 {
1126 newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
1127 state->growmemtuples = false; /* can't grow any more */
1128 }
1129
1130 /*
1131 * We need to be sure that we do not cause LACKMEM to become true, else
1132 * the space management algorithm will go nuts. The code above should
1133 * never generate a dangerous request, but to be safe, check explicitly
1134 * that the array growth fits within availMem. (We could still cause
1135 * LACKMEM if the memory chunk overhead associated with the memtuples
1136 * array were to increase. That shouldn't happen because we chose the
1137 * initial array size large enough to ensure that palloc will be treating
1138 * both old and new arrays as separate chunks. But we'll check LACKMEM
1139 * explicitly below just in case.)
1140 */
1141 if (state->availMem < (int64) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
1142 goto noalloc;
1143
1144 /* OK, do it */
1145 FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
1146 state->memtupsize = newmemtupsize;
1147 state->memtuples = (SortTuple *)
1148 repalloc_huge(state->memtuples,
1149 state->memtupsize * sizeof(SortTuple));
1150 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
1151 if (LACKMEM(state))
1152 elog(ERROR, "unexpected out-of-memory situation in tuplesort");
1153 return true;
1154
1155noalloc:
1156 /* If for any reason we didn't realloc, shut off future attempts */
1157 state->growmemtuples = false;
1158 return false;
1159}
int64_t int64
Definition: c.h:535
size_t Size
Definition: c.h:610
void * repalloc_huge(void *pointer, Size size)
Definition: mcxt.c:1735
#define MaxAllocHugeSize
Definition: memutils.h:45
#define USEMEM(state, amt)
Definition: tuplesort.c:401

References elog, ERROR, FREEMEM, GetMemoryChunkSpace(), LACKMEM, MaxAllocHugeSize, repalloc_huge(), and USEMEM.

Referenced by tuplesort_puttuple_common().

◆ init_slab_allocator()

static void init_slab_allocator ( Tuplesortstate state,
int  numSlots 
)
static

Definition at line 1977 of file tuplesort.c.

1978{
1979 if (numSlots > 0)
1980 {
1981 char *p;
1982 int i;
1983
1984 state->slabMemoryBegin = palloc(numSlots * SLAB_SLOT_SIZE);
1985 state->slabMemoryEnd = state->slabMemoryBegin +
1986 numSlots * SLAB_SLOT_SIZE;
1987 state->slabFreeHead = (SlabSlot *) state->slabMemoryBegin;
1988 USEMEM(state, numSlots * SLAB_SLOT_SIZE);
1989
1990 p = state->slabMemoryBegin;
1991 for (i = 0; i < numSlots - 1; i++)
1992 {
1993 ((SlabSlot *) p)->nextfree = (SlabSlot *) (p + SLAB_SLOT_SIZE);
1994 p += SLAB_SLOT_SIZE;
1995 }
1996 ((SlabSlot *) p)->nextfree = NULL;
1997 }
1998 else
1999 {
2000 state->slabMemoryBegin = state->slabMemoryEnd = NULL;
2001 state->slabFreeHead = NULL;
2002 }
2003 state->slabAllocatorUsed = true;
2004}
void * palloc(Size size)
Definition: mcxt.c:1365
#define SLAB_SLOT_SIZE
Definition: tuplesort.c:142

References i, palloc(), SLAB_SLOT_SIZE, and USEMEM.

Referenced by mergeruns().

◆ inittapes()

static void inittapes ( Tuplesortstate state,
bool  mergeruns 
)
static

Definition at line 1861 of file tuplesort.c.

1862{
1863 Assert(!LEADER(state));
1864
1865 if (mergeruns)
1866 {
1867 /* Compute number of input tapes to use when merging */
1868 state->maxTapes = tuplesort_merge_order(state->allowedMem);
1869 }
1870 else
1871 {
1872 /* Workers can sometimes produce single run, output without merge */
1874 state->maxTapes = MINORDER;
1875 }
1876
1877 if (trace_sort)
1878 elog(LOG, "worker %d switching to external sort with %d tapes: %s",
1879 state->worker, state->maxTapes, pg_rusage_show(&state->ru_start));
1880
1881 /* Create the tape set */
1882 inittapestate(state, state->maxTapes);
1883 state->tapeset =
1885 state->shared ? &state->shared->fileset : NULL,
1886 state->worker);
1887
1888 state->currentRun = 0;
1889
1890 /*
1891 * Initialize logical tape arrays.
1892 */
1893 state->inputTapes = NULL;
1894 state->nInputTapes = 0;
1895 state->nInputRuns = 0;
1896
1897 state->outputTapes = palloc0(state->maxTapes * sizeof(LogicalTape *));
1898 state->nOutputTapes = 0;
1899 state->nOutputRuns = 0;
1900
1901 state->status = TSS_BUILDRUNS;
1902
1904}
LogicalTapeSet * LogicalTapeSetCreate(bool preallocate, SharedFileSet *fileset, int worker)
Definition: logtape.c:556
void * palloc0(Size size)
Definition: mcxt.c:1395
int tuplesort_merge_order(int64 allowedMem)
Definition: tuplesort.c:1774
static void inittapestate(Tuplesortstate *state, int maxTapes)
Definition: tuplesort.c:1910
#define LEADER(state)
Definition: tuplesort.c:405
#define WORKER(state)
Definition: tuplesort.c:404
static void mergeruns(Tuplesortstate *state)
Definition: tuplesort.c:2013
#define MINORDER
Definition: tuplesort.c:176

References Assert(), elog, inittapestate(), LEADER, LOG, LogicalTapeSetCreate(), mergeruns(), MINORDER, palloc0(), pg_rusage_show(), selectnewtape(), trace_sort, TSS_BUILDRUNS, tuplesort_merge_order(), and WORKER.

Referenced by tuplesort_performsort(), and tuplesort_puttuple_common().

◆ inittapestate()

static void inittapestate ( Tuplesortstate state,
int  maxTapes 
)
static

Definition at line 1910 of file tuplesort.c.

1911{
1912 int64 tapeSpace;
1913
1914 /*
1915 * Decrease availMem to reflect the space needed for tape buffers; but
1916 * don't decrease it to the point that we have no room for tuples. (That
1917 * case is only likely to occur if sorting pass-by-value Datums; in all
1918 * other scenarios the memtuples[] array is unlikely to occupy more than
1919 * half of allowedMem. In the pass-by-value case it's not important to
1920 * account for tuple space, so we don't care if LACKMEM becomes
1921 * inaccurate.)
1922 */
1923 tapeSpace = (int64) maxTapes * TAPE_BUFFER_OVERHEAD;
1924
1925 if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
1926 USEMEM(state, tapeSpace);
1927
1928 /*
1929 * Make sure that the temp file(s) underlying the tape set are created in
1930 * suitable temp tablespaces. For parallel sorts, this should have been
1931 * called already, but it doesn't matter if it is called a second time.
1932 */
1934}
void PrepareTempTablespaces(void)
Definition: tablespace.c:1331
#define TAPE_BUFFER_OVERHEAD
Definition: tuplesort.c:178

References GetMemoryChunkSpace(), PrepareTempTablespaces(), TAPE_BUFFER_OVERHEAD, and USEMEM.

Referenced by inittapes(), and leader_takeover_tapes().

◆ leader_takeover_tapes()

static void leader_takeover_tapes ( Tuplesortstate state)
static

Definition at line 3063 of file tuplesort.c.

3064{
3065 Sharedsort *shared = state->shared;
3066 int nParticipants = state->nParticipants;
3067 int workersFinished;
3068 int j;
3069
3071 Assert(nParticipants >= 1);
3072
3073 SpinLockAcquire(&shared->mutex);
3074 workersFinished = shared->workersFinished;
3075 SpinLockRelease(&shared->mutex);
3076
3077 if (nParticipants != workersFinished)
3078 elog(ERROR, "cannot take over tapes before all workers finish");
3079
3080 /*
3081 * Create the tapeset from worker tapes, including a leader-owned tape at
3082 * the end. Parallel workers are far more expensive than logical tapes,
3083 * so the number of tapes allocated here should never be excessive.
3084 */
3085 inittapestate(state, nParticipants);
3086 state->tapeset = LogicalTapeSetCreate(false, &shared->fileset, -1);
3087
3088 /*
3089 * Set currentRun to reflect the number of runs we will merge (it's not
3090 * used for anything, this is just pro forma)
3091 */
3092 state->currentRun = nParticipants;
3093
3094 /*
3095 * Initialize the state to look the same as after building the initial
3096 * runs.
3097 *
3098 * There will always be exactly 1 run per worker, and exactly one input
3099 * tape per run, because workers always output exactly 1 run, even when
3100 * there were no input tuples for workers to sort.
3101 */
3102 state->inputTapes = NULL;
3103 state->nInputTapes = 0;
3104 state->nInputRuns = 0;
3105
3106 state->outputTapes = palloc0(nParticipants * sizeof(LogicalTape *));
3107 state->nOutputTapes = nParticipants;
3108 state->nOutputRuns = nParticipants;
3109
3110 for (j = 0; j < nParticipants; j++)
3111 {
3112 state->outputTapes[j] = LogicalTapeImport(state->tapeset, j, &shared->tapes[j]);
3113 }
3114
3115 state->status = TSS_BUILDRUNS;
3116}
int j
Definition: isn.c:78
LogicalTape * LogicalTapeImport(LogicalTapeSet *lts, int worker, TapeShare *shared)
Definition: logtape.c:609
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
SharedFileSet fileset
Definition: tuplesort.c:360
TapeShare tapes[FLEXIBLE_ARRAY_MEMBER]
Definition: tuplesort.c:369
int workersFinished
Definition: tuplesort.c:357
slock_t mutex
Definition: tuplesort.c:346

References Assert(), elog, ERROR, Sharedsort::fileset, inittapestate(), j, LEADER, LogicalTapeImport(), LogicalTapeSetCreate(), Sharedsort::mutex, palloc0(), SpinLockAcquire, SpinLockRelease, Sharedsort::tapes, TSS_BUILDRUNS, and Sharedsort::workersFinished.

Referenced by tuplesort_performsort().

◆ make_bounded_heap()

static void make_bounded_heap ( Tuplesortstate state)
static

Definition at line 2583 of file tuplesort.c.

2584{
2585 int tupcount = state->memtupcount;
2586 int i;
2587
2588 Assert(state->status == TSS_INITIAL);
2589 Assert(state->bounded);
2590 Assert(tupcount >= state->bound);
2592
2593 /* Reverse sort direction so largest entry will be at root */
2595
2596 state->memtupcount = 0; /* make the heap empty */
2597 for (i = 0; i < tupcount; i++)
2598 {
2599 if (state->memtupcount < state->bound)
2600 {
2601 /* Insert next tuple into heap */
2602 /* Must copy source tuple to avoid possible overwrite */
2603 SortTuple stup = state->memtuples[i];
2604
2606 }
2607 else
2608 {
2609 /*
2610 * The heap is full. Replace the largest entry with the new
2611 * tuple, or just discard it, if it's larger than anything already
2612 * in the heap.
2613 */
2614 if (COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
2615 {
2616 free_sort_tuple(state, &state->memtuples[i]);
2618 }
2619 else
2620 tuplesort_heap_replace_top(state, &state->memtuples[i]);
2621 }
2622 }
2623
2624 Assert(state->memtupcount == state->bound);
2625 state->status = TSS_BOUNDED;
2626}
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define COMPARETUP(state, a, b)
Definition: tuplesort.c:396
#define SERIAL(state)
Definition: tuplesort.c:403
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
Definition: tuplesort.c:3122
static void reversedirection(Tuplesortstate *state)
Definition: tuplesort.c:2832
static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple)
Definition: tuplesort.c:2792

References Assert(), CHECK_FOR_INTERRUPTS, COMPARETUP, free_sort_tuple(), i, reversedirection(), SERIAL, TSS_BOUNDED, TSS_INITIAL, tuplesort_heap_insert(), and tuplesort_heap_replace_top().

Referenced by tuplesort_puttuple_common().

◆ markrunend()

static void markrunend ( LogicalTape tape)
static

Definition at line 2863 of file tuplesort.c.

2864{
2865 unsigned int len = 0;
2866
2867 LogicalTapeWrite(tape, &len, sizeof(len));
2868}
void LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size)
Definition: logtape.c:761

References len, and LogicalTapeWrite().

Referenced by dumptuples(), and mergeonerun().

◆ merge_read_buffer_size()

static int64 merge_read_buffer_size ( int64  avail_mem,
int  nInputTapes,
int  nInputRuns,
int  maxOutputTapes 
)
static

Definition at line 1829 of file tuplesort.c.

1831{
1832 int nOutputRuns;
1833 int nOutputTapes;
1834
1835 /*
1836 * How many output tapes will we produce in this pass?
1837 *
1838 * This is nInputRuns / nInputTapes, rounded up.
1839 */
1840 nOutputRuns = (nInputRuns + nInputTapes - 1) / nInputTapes;
1841
1842 nOutputTapes = Min(nOutputRuns, maxOutputTapes);
1843
1844 /*
1845 * Each output tape consumes TAPE_BUFFER_OVERHEAD bytes of memory. All
1846 * remaining memory is divided evenly between the input tapes.
1847 *
1848 * This also follows from the formula in tuplesort_merge_order, but here
1849 * we derive the input buffer size from the amount of memory available,
1850 * and M and N.
1851 */
1852 return Max((avail_mem - TAPE_BUFFER_OVERHEAD * nOutputTapes) / nInputTapes, 0);
1853}

References Max, Min, and TAPE_BUFFER_OVERHEAD.

Referenced by mergeruns().

◆ mergeonerun()

static void mergeonerun ( Tuplesortstate state)
static

Definition at line 2196 of file tuplesort.c.

2197{
2198 int srcTapeIndex;
2199 LogicalTape *srcTape;
2200
2201 /*
2202 * Start the merge by loading one tuple from each active source tape into
2203 * the heap.
2204 */
2206
2207 Assert(state->slabAllocatorUsed);
2208
2209 /*
2210 * Execute merge by repeatedly extracting lowest tuple in heap, writing it
2211 * out, and replacing it with next tuple from same tape (if there is
2212 * another one).
2213 */
2214 while (state->memtupcount > 0)
2215 {
2216 SortTuple stup;
2217
2218 /* write the tuple to destTape */
2219 srcTapeIndex = state->memtuples[0].srctape;
2220 srcTape = state->inputTapes[srcTapeIndex];
2221 WRITETUP(state, state->destTape, &state->memtuples[0]);
2222
2223 /* recycle the slot of the tuple we just wrote out, for the next read */
2224 if (state->memtuples[0].tuple)
2225 RELEASE_SLAB_SLOT(state, state->memtuples[0].tuple);
2226
2227 /*
2228 * pull next tuple from the tape, and replace the written-out tuple in
2229 * the heap with it.
2230 */
2231 if (mergereadnext(state, srcTape, &stup))
2232 {
2233 stup.srctape = srcTapeIndex;
2235 }
2236 else
2237 {
2239 state->nInputRuns--;
2240 }
2241 }
2242
2243 /*
2244 * When the heap empties, we're done. Write an end-of-run marker on the
2245 * output tape.
2246 */
2247 markrunend(state->destTape);
2248}
static void tuplesort_heap_delete_top(Tuplesortstate *state)
Definition: tuplesort.c:2768
static void beginmerge(Tuplesortstate *state)
Definition: tuplesort.c:2256
#define RELEASE_SLAB_SLOT(state, tuple)
Definition: tuplesort.c:383

References Assert(), beginmerge(), markrunend(), mergereadnext(), RELEASE_SLAB_SLOT, SortTuple::srctape, tuplesort_heap_delete_top(), tuplesort_heap_replace_top(), and WRITETUP.

Referenced by mergeruns().

◆ mergereadnext()

static bool mergereadnext ( Tuplesortstate state,
LogicalTape srcTape,
SortTuple stup 
)
static

Definition at line 2284 of file tuplesort.c.

2285{
2286 unsigned int tuplen;
2287
2288 /* read next tuple, if any */
2289 if ((tuplen = getlen(srcTape, true)) == 0)
2290 return false;
2291 READTUP(state, stup, srcTape, tuplen);
2292
2293 return true;
2294}
static unsigned int getlen(LogicalTape *tape, bool eofOK)
Definition: tuplesort.c:2850
#define READTUP(state, stup, tape, len)
Definition: tuplesort.c:398

References getlen(), and READTUP.

Referenced by beginmerge(), mergeonerun(), and tuplesort_gettuple_common().

◆ mergeruns()

static void mergeruns ( Tuplesortstate state)
static

Definition at line 2013 of file tuplesort.c.

2014{
2015 int tapenum;
2016
2017 Assert(state->status == TSS_BUILDRUNS);
2018 Assert(state->memtupcount == 0);
2019
2020 if (state->base.sortKeys != NULL && state->base.sortKeys->abbrev_converter != NULL)
2021 {
2022 /*
2023 * If there are multiple runs to be merged, when we go to read back
2024 * tuples from disk, abbreviated keys will not have been stored, and
2025 * we don't care to regenerate them. Disable abbreviation from this
2026 * point on.
2027 */
2028 state->base.sortKeys->abbrev_converter = NULL;
2029 state->base.sortKeys->comparator = state->base.sortKeys->abbrev_full_comparator;
2030
2031 /* Not strictly necessary, but be tidy */
2032 state->base.sortKeys->abbrev_abort = NULL;
2033 state->base.sortKeys->abbrev_full_comparator = NULL;
2034 }
2035
2036 /*
2037 * Reset tuple memory. We've freed all the tuples that we previously
2038 * allocated. We will use the slab allocator from now on.
2039 */
2040 MemoryContextResetOnly(state->base.tuplecontext);
2041
2042 /*
2043 * We no longer need a large memtuples array. (We will allocate a smaller
2044 * one for the heap later.)
2045 */
2046 FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
2047 pfree(state->memtuples);
2048 state->memtuples = NULL;
2049
2050 /*
2051 * Initialize the slab allocator. We need one slab slot per input tape,
2052 * for the tuples in the heap, plus one to hold the tuple last returned
2053 * from tuplesort_gettuple. (If we're sorting pass-by-val Datums,
2054 * however, we don't need to do allocate anything.)
2055 *
2056 * In a multi-pass merge, we could shrink this allocation for the last
2057 * merge pass, if it has fewer tapes than previous passes, but we don't
2058 * bother.
2059 *
2060 * From this point on, we no longer use the USEMEM()/LACKMEM() mechanism
2061 * to track memory usage of individual tuples.
2062 */
2063 if (state->base.tuples)
2064 init_slab_allocator(state, state->nOutputTapes + 1);
2065 else
2067
2068 /*
2069 * Allocate a new 'memtuples' array, for the heap. It will hold one tuple
2070 * from each input tape.
2071 *
2072 * We could shrink this, too, between passes in a multi-pass merge, but we
2073 * don't bother. (The initial input tapes are still in outputTapes. The
2074 * number of input tapes will not increase between passes.)
2075 */
2076 state->memtupsize = state->nOutputTapes;
2077 state->memtuples = (SortTuple *) MemoryContextAlloc(state->base.maincontext,
2078 state->nOutputTapes * sizeof(SortTuple));
2079 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
2080
2081 /*
2082 * Use all the remaining memory we have available for tape buffers among
2083 * all the input tapes. At the beginning of each merge pass, we will
2084 * divide this memory between the input and output tapes in the pass.
2085 */
2086 state->tape_buffer_mem = state->availMem;
2087 USEMEM(state, state->tape_buffer_mem);
2088 if (trace_sort)
2089 elog(LOG, "worker %d using %zu KB of memory for tape buffers",
2090 state->worker, state->tape_buffer_mem / 1024);
2091
2092 for (;;)
2093 {
2094 /*
2095 * On the first iteration, or if we have read all the runs from the
2096 * input tapes in a multi-pass merge, it's time to start a new pass.
2097 * Rewind all the output tapes, and make them inputs for the next
2098 * pass.
2099 */
2100 if (state->nInputRuns == 0)
2101 {
2102 int64 input_buffer_size;
2103
2104 /* Close the old, emptied, input tapes */
2105 if (state->nInputTapes > 0)
2106 {
2107 for (tapenum = 0; tapenum < state->nInputTapes; tapenum++)
2108 LogicalTapeClose(state->inputTapes[tapenum]);
2109 pfree(state->inputTapes);
2110 }
2111
2112 /* Previous pass's outputs become next pass's inputs. */
2113 state->inputTapes = state->outputTapes;
2114 state->nInputTapes = state->nOutputTapes;
2115 state->nInputRuns = state->nOutputRuns;
2116
2117 /*
2118 * Reset output tape variables. The actual LogicalTapes will be
2119 * created as needed, here we only allocate the array to hold
2120 * them.
2121 */
2122 state->outputTapes = palloc0(state->nInputTapes * sizeof(LogicalTape *));
2123 state->nOutputTapes = 0;
2124 state->nOutputRuns = 0;
2125
2126 /*
2127 * Redistribute the memory allocated for tape buffers, among the
2128 * new input and output tapes.
2129 */
2130 input_buffer_size = merge_read_buffer_size(state->tape_buffer_mem,
2131 state->nInputTapes,
2132 state->nInputRuns,
2133 state->maxTapes);
2134
2135 if (trace_sort)
2136 elog(LOG, "starting merge pass of %d input runs on %d tapes, " INT64_FORMAT " KB of memory for each input tape: %s",
2137 state->nInputRuns, state->nInputTapes, input_buffer_size / 1024,
2138 pg_rusage_show(&state->ru_start));
2139
2140 /* Prepare the new input tapes for merge pass. */
2141 for (tapenum = 0; tapenum < state->nInputTapes; tapenum++)
2142 LogicalTapeRewindForRead(state->inputTapes[tapenum], input_buffer_size);
2143
2144 /*
2145 * If there's just one run left on each input tape, then only one
2146 * merge pass remains. If we don't have to produce a materialized
2147 * sorted tape, we can stop at this point and do the final merge
2148 * on-the-fly.
2149 */
2150 if ((state->base.sortopt & TUPLESORT_RANDOMACCESS) == 0
2151 && state->nInputRuns <= state->nInputTapes
2152 && !WORKER(state))
2153 {
2154 /* Tell logtape.c we won't be writing anymore */
2156 /* Initialize for the final merge pass */
2158 state->status = TSS_FINALMERGE;
2159 return;
2160 }
2161 }
2162
2163 /* Select an output tape */
2165
2166 /* Merge one run from each input tape. */
2168
2169 /*
2170 * If the input tapes are empty, and we output only one output run,
2171 * we're done. The current output tape contains the final result.
2172 */
2173 if (state->nInputRuns == 0 && state->nOutputRuns <= 1)
2174 break;
2175 }
2176
2177 /*
2178 * Done. The result is on a single run on a single tape.
2179 */
2180 state->result_tape = state->outputTapes[0];
2181 if (!WORKER(state))
2182 LogicalTapeFreeze(state->result_tape, NULL);
2183 else
2185 state->status = TSS_SORTEDONTAPE;
2186
2187 /* Close all the now-empty input tapes, to release their read buffers. */
2188 for (tapenum = 0; tapenum < state->nInputTapes; tapenum++)
2189 LogicalTapeClose(state->inputTapes[tapenum]);
2190}
#define INT64_FORMAT
Definition: c.h:556
void LogicalTapeRewindForRead(LogicalTape *lt, size_t buffer_size)
Definition: logtape.c:846
void LogicalTapeSetForgetFreeSpace(LogicalTapeSet *lts)
Definition: logtape.c:750
void LogicalTapeClose(LogicalTape *lt)
Definition: logtape.c:733
void LogicalTapeFreeze(LogicalTape *lt, TapeShare *share)
Definition: logtape.c:981
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:419
static void mergeonerun(Tuplesortstate *state)
Definition: tuplesort.c:2196
static int64 merge_read_buffer_size(int64 avail_mem, int nInputTapes, int nInputRuns, int maxOutputTapes)
Definition: tuplesort.c:1829
static void worker_freeze_result_tape(Tuplesortstate *state)
Definition: tuplesort.c:3003
static void init_slab_allocator(Tuplesortstate *state, int numSlots)
Definition: tuplesort.c:1977
#define TUPLESORT_RANDOMACCESS
Definition: tuplesort.h:97

References Assert(), beginmerge(), elog, FREEMEM, GetMemoryChunkSpace(), init_slab_allocator(), INT64_FORMAT, LOG, LogicalTapeClose(), LogicalTapeFreeze(), LogicalTapeRewindForRead(), LogicalTapeSetForgetFreeSpace(), MemoryContextAlloc(), MemoryContextResetOnly(), merge_read_buffer_size(), mergeonerun(), palloc0(), pfree(), pg_rusage_show(), selectnewtape(), trace_sort, TSS_BUILDRUNS, TSS_FINALMERGE, TSS_SORTEDONTAPE, TUPLESORT_RANDOMACCESS, USEMEM, WORKER, and worker_freeze_result_tape().

Referenced by inittapes(), and tuplesort_performsort().

◆ qsort_tuple_int32_compare()

static pg_attribute_always_inline int qsort_tuple_int32_compare ( SortTuple a,
SortTuple b,
Tuplesortstate state 
)
static

Definition at line 540 of file tuplesort.c.

541{
542 int compare;
543
544 compare = ApplyInt32SortComparator(a->datum1, a->isnull1,
545 b->datum1, b->isnull1,
546 &state->base.sortKeys[0]);
547
548 if (compare != 0)
549 return compare;
550
551 /*
552 * No need to waste effort calling the tiebreak function when there are no
553 * other keys to sort on.
554 */
555 if (state->base.onlyKey != NULL)
556 return 0;
557
558 return state->base.comparetup_tiebreak(a, b, state);
559}
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
static int ApplyInt32SortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:300

References a, ApplyInt32SortComparator(), b, and compare().

◆ qsort_tuple_signed_compare()

static pg_attribute_always_inline int qsort_tuple_signed_compare ( SortTuple a,
SortTuple b,
Tuplesortstate state 
)
static

Definition at line 517 of file tuplesort.c.

518{
519 int compare;
520
521 compare = ApplySignedSortComparator(a->datum1, a->isnull1,
522 b->datum1, b->isnull1,
523 &state->base.sortKeys[0]);
524
525 if (compare != 0)
526 return compare;
527
528 /*
529 * No need to waste effort calling the tiebreak function when there are no
530 * other keys to sort on.
531 */
532 if (state->base.onlyKey != NULL)
533 return 0;
534
535 return state->base.comparetup_tiebreak(a, b, state);
536}
static int ApplySignedSortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:266

References a, ApplySignedSortComparator(), b, and compare().

◆ qsort_tuple_unsigned_compare()

static pg_attribute_always_inline int qsort_tuple_unsigned_compare ( SortTuple a,
SortTuple b,
Tuplesortstate state 
)
static

Definition at line 495 of file tuplesort.c.

496{
497 int compare;
498
499 compare = ApplyUnsignedSortComparator(a->datum1, a->isnull1,
500 b->datum1, b->isnull1,
501 &state->base.sortKeys[0]);
502 if (compare != 0)
503 return compare;
504
505 /*
506 * No need to waste effort calling the tiebreak function when there are no
507 * other keys to sort on.
508 */
509 if (state->base.onlyKey != NULL)
510 return 0;
511
512 return state->base.comparetup_tiebreak(a, b, state);
513}
static int ApplyUnsignedSortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:233

References a, ApplyUnsignedSortComparator(), b, and compare().

◆ reversedirection()

static void reversedirection ( Tuplesortstate state)
static

Definition at line 2832 of file tuplesort.c.

2833{
2834 SortSupport sortKey = state->base.sortKeys;
2835 int nkey;
2836
2837 for (nkey = 0; nkey < state->base.nKeys; nkey++, sortKey++)
2838 {
2839 sortKey->ssup_reverse = !sortKey->ssup_reverse;
2840 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
2841 }
2842}
bool ssup_nulls_first
Definition: sortsupport.h:75

References SortSupportData::ssup_nulls_first, and SortSupportData::ssup_reverse.

Referenced by make_bounded_heap(), and sort_bounded_heap().

◆ selectnewtape()

static void selectnewtape ( Tuplesortstate state)
static

Definition at line 1944 of file tuplesort.c.

1945{
1946 /*
1947 * At the beginning of each merge pass, nOutputTapes and nOutputRuns are
1948 * both zero. On each call, we create a new output tape to hold the next
1949 * run, until maxTapes is reached. After that, we assign new runs to the
1950 * existing tapes in a round robin fashion.
1951 */
1952 if (state->nOutputTapes < state->maxTapes)
1953 {
1954 /* Create a new tape to hold the next run */
1955 Assert(state->outputTapes[state->nOutputRuns] == NULL);
1956 Assert(state->nOutputRuns == state->nOutputTapes);
1957 state->destTape = LogicalTapeCreate(state->tapeset);
1958 state->outputTapes[state->nOutputTapes] = state->destTape;
1959 state->nOutputTapes++;
1960 state->nOutputRuns++;
1961 }
1962 else
1963 {
1964 /*
1965 * We have reached the max number of tapes. Append to an existing
1966 * tape.
1967 */
1968 state->destTape = state->outputTapes[state->nOutputRuns % state->nOutputTapes];
1969 state->nOutputRuns++;
1970 }
1971}
LogicalTape * LogicalTapeCreate(LogicalTapeSet *lts)
Definition: logtape.c:680

References Assert(), and LogicalTapeCreate().

Referenced by dumptuples(), inittapes(), and mergeruns().

◆ sort_bounded_heap()

static void sort_bounded_heap ( Tuplesortstate state)
static

Definition at line 2632 of file tuplesort.c.

2633{
2634 int tupcount = state->memtupcount;
2635
2636 Assert(state->status == TSS_BOUNDED);
2637 Assert(state->bounded);
2638 Assert(tupcount == state->bound);
2640
2641 /*
2642 * We can unheapify in place because each delete-top call will remove the
2643 * largest entry, which we can promptly store in the newly freed slot at
2644 * the end. Once we're down to a single-entry heap, we're done.
2645 */
2646 while (state->memtupcount > 1)
2647 {
2648 SortTuple stup = state->memtuples[0];
2649
2650 /* this sifts-up the next-largest entry and decreases memtupcount */
2652 state->memtuples[state->memtupcount] = stup;
2653 }
2654 state->memtupcount = tupcount;
2655
2656 /*
2657 * Reverse sort direction back to the original state. This is not
2658 * actually necessary but seems like a good idea for tidiness.
2659 */
2661
2662 state->status = TSS_SORTEDINMEM;
2663 state->boundUsed = true;
2664}

References Assert(), reversedirection(), SERIAL, TSS_BOUNDED, TSS_SORTEDINMEM, and tuplesort_heap_delete_top().

Referenced by tuplesort_performsort().

◆ ssup_datum_int32_cmp()

int ssup_datum_int32_cmp ( Datum  x,
Datum  y,
SortSupport  ssup 
)

Definition at line 3158 of file tuplesort.c.

3159{
3160 int32 xx = DatumGetInt32(x);
3161 int32 yy = DatumGetInt32(y);
3162
3163 if (xx < yy)
3164 return -1;
3165 else if (xx > yy)
3166 return 1;
3167 else
3168 return 0;
3169}
int32_t int32
Definition: c.h:534
int y
Definition: isn.c:76
int x
Definition: isn.c:75
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:212

References DatumGetInt32(), x, and y.

Referenced by btint4sortsupport(), date_sortsupport(), and tuplesort_sort_memtuples().

◆ ssup_datum_signed_cmp()

int ssup_datum_signed_cmp ( Datum  x,
Datum  y,
SortSupport  ssup 
)

Definition at line 3144 of file tuplesort.c.

3145{
3146 int64 xx = DatumGetInt64(x);
3147 int64 yy = DatumGetInt64(y);
3148
3149 if (xx < yy)
3150 return -1;
3151 else if (xx > yy)
3152 return 1;
3153 else
3154 return 0;
3155}
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:393

References DatumGetInt64(), x, and y.

Referenced by btint8sortsupport(), timestamp_sortsupport(), and tuplesort_sort_memtuples().

◆ ssup_datum_unsigned_cmp()

int ssup_datum_unsigned_cmp ( Datum  x,
Datum  y,
SortSupport  ssup 
)

Definition at line 3133 of file tuplesort.c.

3134{
3135 if (x < y)
3136 return -1;
3137 else if (x > y)
3138 return 1;
3139 else
3140 return 0;
3141}

References x, and y.

Referenced by gist_point_sortsupport(), macaddr_sortsupport(), network_sortsupport(), tuplesort_sort_memtuples(), uuid_sortsupport(), and varstr_sortsupport().

◆ tuplesort_attach_shared()

void tuplesort_attach_shared ( Sharedsort shared,
dsm_segment seg 
)

Definition at line 2955 of file tuplesort.c.

2956{
2957 /* Attach to SharedFileSet */
2958 SharedFileSetAttach(&shared->fileset, seg);
2959}
void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg)
Definition: sharedfileset.c:56

References Sharedsort::fileset, and SharedFileSetAttach().

Referenced by _brin_parallel_build_main(), _bt_parallel_build_main(), and _gin_parallel_build_main().

◆ tuplesort_begin_batch()

static void tuplesort_begin_batch ( Tuplesortstate state)
static

Definition at line 748 of file tuplesort.c.

749{
750 MemoryContext oldcontext;
751
752 oldcontext = MemoryContextSwitchTo(state->base.maincontext);
753
754 /*
755 * Caller tuple (e.g. IndexTuple) memory context.
756 *
757 * A dedicated child context used exclusively for caller passed tuples
758 * eases memory management. Resetting at key points reduces
759 * fragmentation. Note that the memtuples array of SortTuples is allocated
760 * in the parent context, not this context, because there is no need to
761 * free memtuples early. For bounded sorts, tuples may be pfreed in any
762 * order, so we use a regular aset.c context so that it can make use of
763 * free'd memory. When the sort is not bounded, we make use of a bump.c
764 * context as this keeps allocations more compact with less wastage.
765 * Allocations are also slightly more CPU efficient.
766 */
767 if (TupleSortUseBumpTupleCxt(state->base.sortopt))
768 state->base.tuplecontext = BumpContextCreate(state->base.sortcontext,
769 "Caller tuples",
771 else
772 state->base.tuplecontext = AllocSetContextCreate(state->base.sortcontext,
773 "Caller tuples",
775
776
777 state->status = TSS_INITIAL;
778 state->bounded = false;
779 state->boundUsed = false;
780
781 state->availMem = state->allowedMem;
782
783 state->tapeset = NULL;
784
785 state->memtupcount = 0;
786
787 /*
788 * Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD;
789 * see comments in grow_memtuples().
790 */
791 state->growmemtuples = true;
792 state->slabAllocatorUsed = false;
793 if (state->memtuples != NULL && state->memtupsize != INITIAL_MEMTUPSIZE)
794 {
795 pfree(state->memtuples);
796 state->memtuples = NULL;
797 state->memtupsize = INITIAL_MEMTUPSIZE;
798 }
799 if (state->memtuples == NULL)
800 {
801 state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
803 }
804
805 /* workMem must be large enough for the minimal memtuples array */
806 if (LACKMEM(state))
807 elog(ERROR, "insufficient memory allowed for sort");
808
809 state->currentRun = 0;
810
811 /*
812 * Tape variables (inputTapes, outputTapes, etc.) will be initialized by
813 * inittapes(), if needed.
814 */
815
816 state->result_tape = NULL; /* flag that result tape has not been formed */
817
818 MemoryContextSwitchTo(oldcontext);
819}
MemoryContext BumpContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: bump.c:133
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
#define INITIAL_MEMTUPSIZE
Definition: tuplesort.c:120
#define TupleSortUseBumpTupleCxt(opt)
Definition: tuplesort.h:109

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, BumpContextCreate(), elog, ERROR, GetMemoryChunkSpace(), INITIAL_MEMTUPSIZE, LACKMEM, MemoryContextSwitchTo(), palloc(), pfree(), TSS_INITIAL, TupleSortUseBumpTupleCxt, and USEMEM.

Referenced by tuplesort_begin_common(), and tuplesort_reset().

◆ tuplesort_begin_common()

Tuplesortstate * tuplesort_begin_common ( int  workMem,
SortCoordinate  coordinate,
int  sortopt 
)

Definition at line 638 of file tuplesort.c.

639{
641 MemoryContext maincontext;
642 MemoryContext sortcontext;
643 MemoryContext oldcontext;
644
645 /* See leader_takeover_tapes() remarks on random access support */
646 if (coordinate && (sortopt & TUPLESORT_RANDOMACCESS))
647 elog(ERROR, "random access disallowed under parallel sort");
648
649 /*
650 * Memory context surviving tuplesort_reset. This memory context holds
651 * data which is useful to keep while sorting multiple similar batches.
652 */
654 "TupleSort main",
656
657 /*
658 * Create a working memory context for one sort operation. The content of
659 * this context is deleted by tuplesort_reset.
660 */
661 sortcontext = AllocSetContextCreate(maincontext,
662 "TupleSort sort",
664
665 /*
666 * Additionally a working memory context for tuples is setup in
667 * tuplesort_begin_batch.
668 */
669
670 /*
671 * Make the Tuplesortstate within the per-sortstate context. This way, we
672 * don't need a separate pfree() operation for it at shutdown.
673 */
674 oldcontext = MemoryContextSwitchTo(maincontext);
675
677
678 if (trace_sort)
679 pg_rusage_init(&state->ru_start);
680
681 state->base.sortopt = sortopt;
682 state->base.tuples = true;
683 state->abbrevNext = 10;
684
685 /*
686 * workMem is forced to be at least 64KB, the current minimum valid value
687 * for the work_mem GUC. This is a defense against parallel sort callers
688 * that divide out memory among many workers in a way that leaves each
689 * with very little memory.
690 */
691 state->allowedMem = Max(workMem, 64) * (int64) 1024;
692 state->base.sortcontext = sortcontext;
693 state->base.maincontext = maincontext;
694
695 /*
696 * Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD;
697 * see comments in grow_memtuples().
698 */
699 state->memtupsize = INITIAL_MEMTUPSIZE;
700 state->memtuples = NULL;
701
702 /*
703 * After all of the other non-parallel-related state, we setup all of the
704 * state needed for each batch.
705 */
707
708 /*
709 * Initialize parallel-related state based on coordination information
710 * from caller
711 */
712 if (!coordinate)
713 {
714 /* Serial sort */
715 state->shared = NULL;
716 state->worker = -1;
717 state->nParticipants = -1;
718 }
719 else if (coordinate->isWorker)
720 {
721 /* Parallel worker produces exactly one final run from all input */
722 state->shared = coordinate->sharedsort;
724 state->nParticipants = -1;
725 }
726 else
727 {
728 /* Parallel leader state only used for final merge */
729 state->shared = coordinate->sharedsort;
730 state->worker = -1;
731 state->nParticipants = coordinate->nParticipants;
732 Assert(state->nParticipants >= 1);
733 }
734
735 MemoryContextSwitchTo(oldcontext);
736
737 return state;
738}
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
Sharedsort * sharedsort
Definition: tuplesort.h:59
static int worker_get_identifier(Tuplesortstate *state)
Definition: tuplesort.c:2975
static void tuplesort_begin_batch(Tuplesortstate *state)
Definition: tuplesort.c:748

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert(), CurrentMemoryContext, elog, ERROR, INITIAL_MEMTUPSIZE, SortCoordinateData::isWorker, Max, MemoryContextSwitchTo(), SortCoordinateData::nParticipants, palloc0(), pg_rusage_init(), SortCoordinateData::sharedsort, trace_sort, tuplesort_begin_batch(), TUPLESORT_RANDOMACCESS, and worker_get_identifier().

Referenced by tuplesort_begin_cluster(), tuplesort_begin_datum(), tuplesort_begin_heap(), tuplesort_begin_index_brin(), tuplesort_begin_index_btree(), tuplesort_begin_index_gin(), tuplesort_begin_index_gist(), and tuplesort_begin_index_hash().

◆ tuplesort_end()

◆ tuplesort_estimate_shared()

Size tuplesort_estimate_shared ( int  nWorkers)

Definition at line 2911 of file tuplesort.c.

2912{
2913 Size tapesSize;
2914
2915 Assert(nWorkers > 0);
2916
2917 /* Make sure that BufFile shared state is MAXALIGN'd */
2918 tapesSize = mul_size(sizeof(TapeShare), nWorkers);
2919 tapesSize = MAXALIGN(add_size(tapesSize, offsetof(Sharedsort, tapes)));
2920
2921 return tapesSize;
2922}
#define MAXALIGN(LEN)
Definition: c.h:810
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510

References add_size(), Assert(), MAXALIGN, and mul_size().

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), and _gin_begin_parallel().

◆ tuplesort_free()

static void tuplesort_free ( Tuplesortstate state)
static

Definition at line 893 of file tuplesort.c.

894{
895 /* context swap probably not needed, but let's be safe */
896 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
897 int64 spaceUsed;
898
899 if (state->tapeset)
900 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
901 else
902 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
903
904 /*
905 * Delete temporary "tape" files, if any.
906 *
907 * We don't bother to destroy the individual tapes here. They will go away
908 * with the sortcontext. (In TSS_FINALMERGE state, we have closed
909 * finished tapes already.)
910 */
911 if (state->tapeset)
912 LogicalTapeSetClose(state->tapeset);
913
914 if (trace_sort)
915 {
916 if (state->tapeset)
917 elog(LOG, "%s of worker %d ended, %" PRId64 " disk blocks used: %s",
918 SERIAL(state) ? "external sort" : "parallel external sort",
919 state->worker, spaceUsed, pg_rusage_show(&state->ru_start));
920 else
921 elog(LOG, "%s of worker %d ended, %" PRId64 " KB used: %s",
922 SERIAL(state) ? "internal sort" : "unperformed parallel sort",
923 state->worker, spaceUsed, pg_rusage_show(&state->ru_start));
924 }
925
926 TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
927
929 MemoryContextSwitchTo(oldcontext);
930
931 /*
932 * Free the per-sort memory context, thereby releasing all working memory.
933 */
934 MemoryContextReset(state->base.sortcontext);
935}
int64 LogicalTapeSetBlocks(LogicalTapeSet *lts)
Definition: logtape.c:1181
void LogicalTapeSetClose(LogicalTapeSet *lts)
Definition: logtape.c:667
#define FREESTATE(state)
Definition: tuplesort.c:399

References elog, FREESTATE, LOG, LogicalTapeSetBlocks(), LogicalTapeSetClose(), MemoryContextReset(), MemoryContextSwitchTo(), pg_rusage_show(), SERIAL, and trace_sort.

Referenced by tuplesort_end(), and tuplesort_reset().

◆ tuplesort_get_stats()

void tuplesort_get_stats ( Tuplesortstate state,
TuplesortInstrumentation stats 
)

Definition at line 2495 of file tuplesort.c.

2497{
2498 /*
2499 * Note: it might seem we should provide both memory and disk usage for a
2500 * disk-based sort. However, the current code doesn't track memory space
2501 * accurately once we have begun to return tuples to the caller (since we
2502 * don't account for pfree's the caller is expected to do), so we cannot
2503 * rely on availMem in a disk sort. This does not seem worth the overhead
2504 * to fix. Is it worth creating an API for the memory context code to
2505 * tell us how much is actually used in sortcontext?
2506 */
2508
2509 if (state->isMaxSpaceDisk)
2511 else
2513 stats->spaceUsed = (state->maxSpace + 1023) / 1024;
2514
2515 switch (state->maxSpaceStatus)
2516 {
2517 case TSS_SORTEDINMEM:
2518 if (state->boundUsed)
2520 else
2522 break;
2523 case TSS_SORTEDONTAPE:
2525 break;
2526 case TSS_FINALMERGE:
2528 break;
2529 default:
2531 break;
2532 }
2533}
TuplesortMethod sortMethod
Definition: tuplesort.h:113
TuplesortSpaceType spaceType
Definition: tuplesort.h:114
static void tuplesort_updatemax(Tuplesortstate *state)
Definition: tuplesort.c:964
@ SORT_SPACE_TYPE_DISK
Definition: tuplesort.h:89
@ SORT_SPACE_TYPE_MEMORY
Definition: tuplesort.h:90
@ SORT_TYPE_EXTERNAL_SORT
Definition: tuplesort.h:81
@ SORT_TYPE_TOP_N_HEAPSORT
Definition: tuplesort.h:79
@ SORT_TYPE_QUICKSORT
Definition: tuplesort.h:80
@ SORT_TYPE_STILL_IN_PROGRESS
Definition: tuplesort.h:78
@ SORT_TYPE_EXTERNAL_MERGE
Definition: tuplesort.h:82

References SORT_SPACE_TYPE_DISK, SORT_SPACE_TYPE_MEMORY, SORT_TYPE_EXTERNAL_MERGE, SORT_TYPE_EXTERNAL_SORT, SORT_TYPE_QUICKSORT, SORT_TYPE_STILL_IN_PROGRESS, SORT_TYPE_TOP_N_HEAPSORT, TuplesortInstrumentation::sortMethod, TuplesortInstrumentation::spaceType, TuplesortInstrumentation::spaceUsed, TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and tuplesort_updatemax().

Referenced by ExecSort(), instrumentSortedGroup(), and show_sort_info().

◆ tuplesort_gettuple_common()

bool tuplesort_gettuple_common ( Tuplesortstate state,
bool  forward,
SortTuple stup 
)

Definition at line 1466 of file tuplesort.c.

1468{
1469 unsigned int tuplen;
1470 size_t nmoved;
1471
1472 Assert(!WORKER(state));
1473
1474 switch (state->status)
1475 {
1476 case TSS_SORTEDINMEM:
1477 Assert(forward || state->base.sortopt & TUPLESORT_RANDOMACCESS);
1478 Assert(!state->slabAllocatorUsed);
1479 if (forward)
1480 {
1481 if (state->current < state->memtupcount)
1482 {
1483 *stup = state->memtuples[state->current++];
1484 return true;
1485 }
1486 state->eof_reached = true;
1487
1488 /*
1489 * Complain if caller tries to retrieve more tuples than
1490 * originally asked for in a bounded sort. This is because
1491 * returning EOF here might be the wrong thing.
1492 */
1493 if (state->bounded && state->current >= state->bound)
1494 elog(ERROR, "retrieved too many tuples in a bounded sort");
1495
1496 return false;
1497 }
1498 else
1499 {
1500 if (state->current <= 0)
1501 return false;
1502
1503 /*
1504 * if all tuples are fetched already then we return last
1505 * tuple, else - tuple before last returned.
1506 */
1507 if (state->eof_reached)
1508 state->eof_reached = false;
1509 else
1510 {
1511 state->current--; /* last returned tuple */
1512 if (state->current <= 0)
1513 return false;
1514 }
1515 *stup = state->memtuples[state->current - 1];
1516 return true;
1517 }
1518 break;
1519
1520 case TSS_SORTEDONTAPE:
1521 Assert(forward || state->base.sortopt & TUPLESORT_RANDOMACCESS);
1522 Assert(state->slabAllocatorUsed);
1523
1524 /*
1525 * The slot that held the tuple that we returned in previous
1526 * gettuple call can now be reused.
1527 */
1528 if (state->lastReturnedTuple)
1529 {
1530 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1531 state->lastReturnedTuple = NULL;
1532 }
1533
1534 if (forward)
1535 {
1536 if (state->eof_reached)
1537 return false;
1538
1539 if ((tuplen = getlen(state->result_tape, true)) != 0)
1540 {
1541 READTUP(state, stup, state->result_tape, tuplen);
1542
1543 /*
1544 * Remember the tuple we return, so that we can recycle
1545 * its memory on next call. (This can be NULL, in the
1546 * !state->tuples case).
1547 */
1548 state->lastReturnedTuple = stup->tuple;
1549
1550 return true;
1551 }
1552 else
1553 {
1554 state->eof_reached = true;
1555 return false;
1556 }
1557 }
1558
1559 /*
1560 * Backward.
1561 *
1562 * if all tuples are fetched already then we return last tuple,
1563 * else - tuple before last returned.
1564 */
1565 if (state->eof_reached)
1566 {
1567 /*
1568 * Seek position is pointing just past the zero tuplen at the
1569 * end of file; back up to fetch last tuple's ending length
1570 * word. If seek fails we must have a completely empty file.
1571 */
1572 nmoved = LogicalTapeBackspace(state->result_tape,
1573 2 * sizeof(unsigned int));
1574 if (nmoved == 0)
1575 return false;
1576 else if (nmoved != 2 * sizeof(unsigned int))
1577 elog(ERROR, "unexpected tape position");
1578 state->eof_reached = false;
1579 }
1580 else
1581 {
1582 /*
1583 * Back up and fetch previously-returned tuple's ending length
1584 * word. If seek fails, assume we are at start of file.
1585 */
1586 nmoved = LogicalTapeBackspace(state->result_tape,
1587 sizeof(unsigned int));
1588 if (nmoved == 0)
1589 return false;
1590 else if (nmoved != sizeof(unsigned int))
1591 elog(ERROR, "unexpected tape position");
1592 tuplen = getlen(state->result_tape, false);
1593
1594 /*
1595 * Back up to get ending length word of tuple before it.
1596 */
1597 nmoved = LogicalTapeBackspace(state->result_tape,
1598 tuplen + 2 * sizeof(unsigned int));
1599 if (nmoved == tuplen + sizeof(unsigned int))
1600 {
1601 /*
1602 * We backed up over the previous tuple, but there was no
1603 * ending length word before it. That means that the prev
1604 * tuple is the first tuple in the file. It is now the
1605 * next to read in forward direction (not obviously right,
1606 * but that is what in-memory case does).
1607 */
1608 return false;
1609 }
1610 else if (nmoved != tuplen + 2 * sizeof(unsigned int))
1611 elog(ERROR, "bogus tuple length in backward scan");
1612 }
1613
1614 tuplen = getlen(state->result_tape, false);
1615
1616 /*
1617 * Now we have the length of the prior tuple, back up and read it.
1618 * Note: READTUP expects we are positioned after the initial
1619 * length word of the tuple, so back up to that point.
1620 */
1621 nmoved = LogicalTapeBackspace(state->result_tape,
1622 tuplen);
1623 if (nmoved != tuplen)
1624 elog(ERROR, "bogus tuple length in backward scan");
1625 READTUP(state, stup, state->result_tape, tuplen);
1626
1627 /*
1628 * Remember the tuple we return, so that we can recycle its memory
1629 * on next call. (This can be NULL, in the Datum case).
1630 */
1631 state->lastReturnedTuple = stup->tuple;
1632
1633 return true;
1634
1635 case TSS_FINALMERGE:
1636 Assert(forward);
1637 /* We are managing memory ourselves, with the slab allocator. */
1638 Assert(state->slabAllocatorUsed);
1639
1640 /*
1641 * The slab slot holding the tuple that we returned in previous
1642 * gettuple call can now be reused.
1643 */
1644 if (state->lastReturnedTuple)
1645 {
1646 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1647 state->lastReturnedTuple = NULL;
1648 }
1649
1650 /*
1651 * This code should match the inner loop of mergeonerun().
1652 */
1653 if (state->memtupcount > 0)
1654 {
1655 int srcTapeIndex = state->memtuples[0].srctape;
1656 LogicalTape *srcTape = state->inputTapes[srcTapeIndex];
1657 SortTuple newtup;
1658
1659 *stup = state->memtuples[0];
1660
1661 /*
1662 * Remember the tuple we return, so that we can recycle its
1663 * memory on next call. (This can be NULL, in the Datum case).
1664 */
1665 state->lastReturnedTuple = stup->tuple;
1666
1667 /*
1668 * Pull next tuple from tape, and replace the returned tuple
1669 * at top of the heap with it.
1670 */
1671 if (!mergereadnext(state, srcTape, &newtup))
1672 {
1673 /*
1674 * If no more data, we've reached end of run on this tape.
1675 * Remove the top node from the heap.
1676 */
1678 state->nInputRuns--;
1679
1680 /*
1681 * Close the tape. It'd go away at the end of the sort
1682 * anyway, but better to release the memory early.
1683 */
1684 LogicalTapeClose(srcTape);
1685 return true;
1686 }
1687 newtup.srctape = srcTapeIndex;
1689 return true;
1690 }
1691 return false;
1692
1693 default:
1694 elog(ERROR, "invalid tuplesort state");
1695 return false; /* keep compiler quiet */
1696 }
1697}
size_t LogicalTapeBackspace(LogicalTape *lt, size_t size)
Definition: logtape.c:1062

References Assert(), elog, ERROR, getlen(), LogicalTapeBackspace(), LogicalTapeClose(), mergereadnext(), READTUP, RELEASE_SLAB_SLOT, SortTuple::srctape, TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, SortTuple::tuple, tuplesort_heap_delete_top(), tuplesort_heap_replace_top(), TUPLESORT_RANDOMACCESS, and WORKER.

Referenced by tuplesort_getbrintuple(), tuplesort_getdatum(), tuplesort_getgintuple(), tuplesort_getheaptuple(), tuplesort_getindextuple(), tuplesort_gettupleslot(), and tuplesort_skiptuples().

◆ tuplesort_heap_delete_top()

static void tuplesort_heap_delete_top ( Tuplesortstate state)
static

Definition at line 2768 of file tuplesort.c.

2769{
2770 SortTuple *memtuples = state->memtuples;
2771 SortTuple *tuple;
2772
2773 if (--state->memtupcount <= 0)
2774 return;
2775
2776 /*
2777 * Remove the last tuple in the heap, and re-insert it, by replacing the
2778 * current top node with it.
2779 */
2780 tuple = &memtuples[state->memtupcount];
2782}

References tuplesort_heap_replace_top().

Referenced by mergeonerun(), sort_bounded_heap(), and tuplesort_gettuple_common().

◆ tuplesort_heap_insert()

static void tuplesort_heap_insert ( Tuplesortstate state,
SortTuple tuple 
)
static

Definition at line 2733 of file tuplesort.c.

2734{
2735 SortTuple *memtuples;
2736 int j;
2737
2738 memtuples = state->memtuples;
2739 Assert(state->memtupcount < state->memtupsize);
2740
2742
2743 /*
2744 * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
2745 * using 1-based array indexes, not 0-based.
2746 */
2747 j = state->memtupcount++;
2748 while (j > 0)
2749 {
2750 int i = (j - 1) >> 1;
2751
2752 if (COMPARETUP(state, tuple, &memtuples[i]) >= 0)
2753 break;
2754 memtuples[j] = memtuples[i];
2755 j = i;
2756 }
2757 memtuples[j] = *tuple;
2758}

References Assert(), CHECK_FOR_INTERRUPTS, COMPARETUP, i, and j.

Referenced by beginmerge(), and make_bounded_heap().

◆ tuplesort_heap_replace_top()

static void tuplesort_heap_replace_top ( Tuplesortstate state,
SortTuple tuple 
)
static

Definition at line 2792 of file tuplesort.c.

2793{
2794 SortTuple *memtuples = state->memtuples;
2795 unsigned int i,
2796 n;
2797
2798 Assert(state->memtupcount >= 1);
2799
2801
2802 /*
2803 * state->memtupcount is "int", but we use "unsigned int" for i, j, n.
2804 * This prevents overflow in the "2 * i + 1" calculation, since at the top
2805 * of the loop we must have i < n <= INT_MAX <= UINT_MAX/2.
2806 */
2807 n = state->memtupcount;
2808 i = 0; /* i is where the "hole" is */
2809 for (;;)
2810 {
2811 unsigned int j = 2 * i + 1;
2812
2813 if (j >= n)
2814 break;
2815 if (j + 1 < n &&
2816 COMPARETUP(state, &memtuples[j], &memtuples[j + 1]) > 0)
2817 j++;
2818 if (COMPARETUP(state, tuple, &memtuples[j]) <= 0)
2819 break;
2820 memtuples[i] = memtuples[j];
2821 i = j;
2822 }
2823 memtuples[i] = *tuple;
2824}

References Assert(), CHECK_FOR_INTERRUPTS, COMPARETUP, i, and j.

Referenced by make_bounded_heap(), mergeonerun(), tuplesort_gettuple_common(), tuplesort_heap_delete_top(), and tuplesort_puttuple_common().

◆ tuplesort_initialize_shared()

void tuplesort_initialize_shared ( Sharedsort shared,
int  nWorkers,
dsm_segment seg 
)

Definition at line 2932 of file tuplesort.c.

2933{
2934 int i;
2935
2936 Assert(nWorkers > 0);
2937
2938 SpinLockInit(&shared->mutex);
2939 shared->currentWorker = 0;
2940 shared->workersFinished = 0;
2941 SharedFileSetInit(&shared->fileset, seg);
2942 shared->nTapes = nWorkers;
2943 for (i = 0; i < nWorkers; i++)
2944 {
2945 shared->tapes[i].firstblocknumber = 0L;
2946 }
2947}
void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
Definition: sharedfileset.c:38
#define SpinLockInit(lock)
Definition: spin.h:57
int nTapes
Definition: tuplesort.c:363
int currentWorker
Definition: tuplesort.c:356
int64 firstblocknumber
Definition: logtape.h:54

References Assert(), Sharedsort::currentWorker, Sharedsort::fileset, TapeShare::firstblocknumber, i, Sharedsort::mutex, Sharedsort::nTapes, SharedFileSetInit(), SpinLockInit, Sharedsort::tapes, and Sharedsort::workersFinished.

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), and _gin_begin_parallel().

◆ tuplesort_markpos()

void tuplesort_markpos ( Tuplesortstate state)

Definition at line 2431 of file tuplesort.c.

2432{
2433 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2434
2435 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2436
2437 switch (state->status)
2438 {
2439 case TSS_SORTEDINMEM:
2440 state->markpos_offset = state->current;
2441 state->markpos_eof = state->eof_reached;
2442 break;
2443 case TSS_SORTEDONTAPE:
2444 LogicalTapeTell(state->result_tape,
2445 &state->markpos_block,
2446 &state->markpos_offset);
2447 state->markpos_eof = state->eof_reached;
2448 break;
2449 default:
2450 elog(ERROR, "invalid tuplesort state");
2451 break;
2452 }
2453
2454 MemoryContextSwitchTo(oldcontext);
2455}
void LogicalTapeTell(LogicalTape *lt, int64 *blocknum, int *offset)
Definition: logtape.c:1162

References Assert(), elog, ERROR, LogicalTapeTell(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecSortMarkPos().

◆ tuplesort_merge_order()

int tuplesort_merge_order ( int64  allowedMem)

Definition at line 1774 of file tuplesort.c.

1775{
1776 int mOrder;
1777
1778 /*----------
1779 * In the merge phase, we need buffer space for each input and output tape.
1780 * Each pass in the balanced merge algorithm reads from M input tapes, and
1781 * writes to N output tapes. Each tape consumes TAPE_BUFFER_OVERHEAD bytes
1782 * of memory. In addition to that, we want MERGE_BUFFER_SIZE workspace per
1783 * input tape.
1784 *
1785 * totalMem = M * (TAPE_BUFFER_OVERHEAD + MERGE_BUFFER_SIZE) +
1786 * N * TAPE_BUFFER_OVERHEAD
1787 *
1788 * Except for the last and next-to-last merge passes, where there can be
1789 * fewer tapes left to process, M = N. We choose M so that we have the
1790 * desired amount of memory available for the input buffers
1791 * (TAPE_BUFFER_OVERHEAD + MERGE_BUFFER_SIZE), given the total memory
1792 * available for the tape buffers (allowedMem).
1793 *
1794 * Note: you might be thinking we need to account for the memtuples[]
1795 * array in this calculation, but we effectively treat that as part of the
1796 * MERGE_BUFFER_SIZE workspace.
1797 *----------
1798 */
1799 mOrder = allowedMem /
1801
1802 /*
1803 * Even in minimum memory, use at least a MINORDER merge. On the other
1804 * hand, even when we have lots of memory, do not use more than a MAXORDER
1805 * merge. Tapes are pretty cheap, but they're not entirely free. Each
1806 * additional tape reduces the amount of memory available to build runs,
1807 * which in turn can cause the same sort to need more runs, which makes
1808 * merging slower even if it can still be done in a single pass. Also,
1809 * high order merges are quite slow due to CPU cache effects; it can be
1810 * faster to pay the I/O cost of a multi-pass merge than to perform a
1811 * single merge pass across many hundreds of tapes.
1812 */
1813 mOrder = Max(mOrder, MINORDER);
1814 mOrder = Min(mOrder, MAXORDER);
1815
1816 return mOrder;
1817}
#define MAXORDER
Definition: tuplesort.c:177
#define MERGE_BUFFER_SIZE
Definition: tuplesort.c:179

References Max, MAXORDER, MERGE_BUFFER_SIZE, Min, MINORDER, and TAPE_BUFFER_OVERHEAD.

Referenced by cost_tuplesort(), and inittapes().

◆ tuplesort_method_name()

const char * tuplesort_method_name ( TuplesortMethod  m)

Definition at line 2539 of file tuplesort.c.

2540{
2541 switch (m)
2542 {
2544 return "still in progress";
2546 return "top-N heapsort";
2548 return "quicksort";
2550 return "external sort";
2552 return "external merge";
2553 }
2554
2555 return "unknown";
2556}

References SORT_TYPE_EXTERNAL_MERGE, SORT_TYPE_EXTERNAL_SORT, SORT_TYPE_QUICKSORT, SORT_TYPE_STILL_IN_PROGRESS, and SORT_TYPE_TOP_N_HEAPSORT.

Referenced by show_incremental_sort_group_info(), and show_sort_info().

◆ tuplesort_performsort()

void tuplesort_performsort ( Tuplesortstate state)

Definition at line 1359 of file tuplesort.c.

1360{
1361 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1362
1363 if (trace_sort)
1364 elog(LOG, "performsort of worker %d starting: %s",
1365 state->worker, pg_rusage_show(&state->ru_start));
1366
1367 switch (state->status)
1368 {
1369 case TSS_INITIAL:
1370
1371 /*
1372 * We were able to accumulate all the tuples within the allowed
1373 * amount of memory, or leader to take over worker tapes
1374 */
1375 if (SERIAL(state))
1376 {
1377 /* Just qsort 'em and we're done */
1379 state->status = TSS_SORTEDINMEM;
1380 }
1381 else if (WORKER(state))
1382 {
1383 /*
1384 * Parallel workers must still dump out tuples to tape. No
1385 * merge is required to produce single output run, though.
1386 */
1387 inittapes(state, false);
1388 dumptuples(state, true);
1390 state->status = TSS_SORTEDONTAPE;
1391 }
1392 else
1393 {
1394 /*
1395 * Leader will take over worker tapes and merge worker runs.
1396 * Note that mergeruns sets the correct state->status.
1397 */
1400 }
1401 state->current = 0;
1402 state->eof_reached = false;
1403 state->markpos_block = 0L;
1404 state->markpos_offset = 0;
1405 state->markpos_eof = false;
1406 break;
1407
1408 case TSS_BOUNDED:
1409
1410 /*
1411 * We were able to accumulate all the tuples required for output
1412 * in memory, using a heap to eliminate excess tuples. Now we
1413 * have to transform the heap to a properly-sorted array. Note
1414 * that sort_bounded_heap sets the correct state->status.
1415 */
1417 state->current = 0;
1418 state->eof_reached = false;
1419 state->markpos_offset = 0;
1420 state->markpos_eof = false;
1421 break;
1422
1423 case TSS_BUILDRUNS:
1424
1425 /*
1426 * Finish tape-based sort. First, flush all tuples remaining in
1427 * memory out to tape; then merge until we have a single remaining
1428 * run (or, if !randomAccess and !WORKER(), one run per tape).
1429 * Note that mergeruns sets the correct state->status.
1430 */
1431 dumptuples(state, true);
1433 state->eof_reached = false;
1434 state->markpos_block = 0L;
1435 state->markpos_offset = 0;
1436 state->markpos_eof = false;
1437 break;
1438
1439 default:
1440 elog(ERROR, "invalid tuplesort state");
1441 break;
1442 }
1443
1444 if (trace_sort)
1445 {
1446 if (state->status == TSS_FINALMERGE)
1447 elog(LOG, "performsort of worker %d done (except %d-way final merge): %s",
1448 state->worker, state->nInputTapes,
1449 pg_rusage_show(&state->ru_start));
1450 else
1451 elog(LOG, "performsort of worker %d done: %s",
1452 state->worker, pg_rusage_show(&state->ru_start));
1453 }
1454
1455 MemoryContextSwitchTo(oldcontext);
1456}
static void sort_bounded_heap(Tuplesortstate *state)
Definition: tuplesort.c:2632
static void leader_takeover_tapes(Tuplesortstate *state)
Definition: tuplesort.c:3063
static void inittapes(Tuplesortstate *state, bool mergeruns)
Definition: tuplesort.c:1861
static void worker_nomergeruns(Tuplesortstate *state)
Definition: tuplesort.c:3041
static void dumptuples(Tuplesortstate *state, bool alltuples)
Definition: tuplesort.c:2303

References dumptuples(), elog, ERROR, inittapes(), leader_takeover_tapes(), LOG, MemoryContextSwitchTo(), mergeruns(), pg_rusage_show(), SERIAL, sort_bounded_heap(), trace_sort, TSS_BOUNDED, TSS_BUILDRUNS, TSS_FINALMERGE, TSS_INITIAL, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, tuplesort_sort_memtuples(), WORKER, and worker_nomergeruns().

Referenced by _brin_parallel_merge(), _brin_parallel_scan_and_build(), _bt_leafbuild(), _bt_parallel_scan_and_sort(), _gin_parallel_merge(), _gin_parallel_scan_and_build(), _gin_process_worker_data(), _h_indexbuild(), array_sort_internal(), ExecIncrementalSort(), ExecSort(), gistbuild(), heapam_relation_copy_for_cluster(), hypothetical_dense_rank_final(), hypothetical_rank_common(), initialize_phase(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), percentile_disc_multi_final(), process_ordered_aggregate_multi(), process_ordered_aggregate_single(), switchToPresortedPrefixMode(), and validate_index().

◆ tuplesort_puttuple_common()

void tuplesort_puttuple_common ( Tuplesortstate state,
SortTuple tuple,
bool  useAbbrev,
Size  tuplen 
)

Definition at line 1165 of file tuplesort.c.

1167{
1168 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1169
1170 Assert(!LEADER(state));
1171
1172 /* account for the memory used for this tuple */
1173 USEMEM(state, tuplen);
1174 state->tupleMem += tuplen;
1175
1176 if (!useAbbrev)
1177 {
1178 /*
1179 * Leave ordinary Datum representation, or NULL value. If there is a
1180 * converter it won't expect NULL values, and cost model is not
1181 * required to account for NULL, so in that case we avoid calling
1182 * converter and just set datum1 to zeroed representation (to be
1183 * consistent, and to support cheap inequality tests for NULL
1184 * abbreviated keys).
1185 */
1186 }
1187 else if (!consider_abort_common(state))
1188 {
1189 /* Store abbreviated key representation */
1190 tuple->datum1 = state->base.sortKeys->abbrev_converter(tuple->datum1,
1191 state->base.sortKeys);
1192 }
1193 else
1194 {
1195 /*
1196 * Set state to be consistent with never trying abbreviation.
1197 *
1198 * Alter datum1 representation in already-copied tuples, so as to
1199 * ensure a consistent representation (current tuple was just
1200 * handled). It does not matter if some dumped tuples are already
1201 * sorted on tape, since serialized tuples lack abbreviated keys
1202 * (TSS_BUILDRUNS state prevents control reaching here in any case).
1203 */
1204 REMOVEABBREV(state, state->memtuples, state->memtupcount);
1205 }
1206
1207 switch (state->status)
1208 {
1209 case TSS_INITIAL:
1210
1211 /*
1212 * Save the tuple into the unsorted array. First, grow the array
1213 * as needed. Note that we try to grow the array when there is
1214 * still one free slot remaining --- if we fail, there'll still be
1215 * room to store the incoming tuple, and then we'll switch to
1216 * tape-based operation.
1217 */
1218 if (state->memtupcount >= state->memtupsize - 1)
1219 {
1220 (void) grow_memtuples(state);
1221 Assert(state->memtupcount < state->memtupsize);
1222 }
1223 state->memtuples[state->memtupcount++] = *tuple;
1224
1225 /*
1226 * Check if it's time to switch over to a bounded heapsort. We do
1227 * so if the input tuple count exceeds twice the desired tuple
1228 * count (this is a heuristic for where heapsort becomes cheaper
1229 * than a quicksort), or if we've just filled workMem and have
1230 * enough tuples to meet the bound.
1231 *
1232 * Note that once we enter TSS_BOUNDED state we will always try to
1233 * complete the sort that way. In the worst case, if later input
1234 * tuples are larger than earlier ones, this might cause us to
1235 * exceed workMem significantly.
1236 */
1237 if (state->bounded &&
1238 (state->memtupcount > state->bound * 2 ||
1239 (state->memtupcount > state->bound && LACKMEM(state))))
1240 {
1241 if (trace_sort)
1242 elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1243 state->memtupcount,
1244 pg_rusage_show(&state->ru_start));
1246 MemoryContextSwitchTo(oldcontext);
1247 return;
1248 }
1249
1250 /*
1251 * Done if we still fit in available memory and have array slots.
1252 */
1253 if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1254 {
1255 MemoryContextSwitchTo(oldcontext);
1256 return;
1257 }
1258
1259 /*
1260 * Nope; time to switch to tape-based operation.
1261 */
1262 inittapes(state, true);
1263
1264 /*
1265 * Dump all tuples.
1266 */
1267 dumptuples(state, false);
1268 break;
1269
1270 case TSS_BOUNDED:
1271
1272 /*
1273 * We don't want to grow the array here, so check whether the new
1274 * tuple can be discarded before putting it in. This should be a
1275 * good speed optimization, too, since when there are many more
1276 * input tuples than the bound, most input tuples can be discarded
1277 * with just this one comparison. Note that because we currently
1278 * have the sort direction reversed, we must check for <= not >=.
1279 */
1280 if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1281 {
1282 /* new tuple <= top of the heap, so we can discard it */
1283 free_sort_tuple(state, tuple);
1285 }
1286 else
1287 {
1288 /* discard top of heap, replacing it with the new tuple */
1289 free_sort_tuple(state, &state->memtuples[0]);
1291 }
1292 break;
1293
1294 case TSS_BUILDRUNS:
1295
1296 /*
1297 * Save the tuple into the unsorted array (there must be space)
1298 */
1299 state->memtuples[state->memtupcount++] = *tuple;
1300
1301 /*
1302 * If we are over the memory limit, dump all tuples.
1303 */
1304 dumptuples(state, false);
1305 break;
1306
1307 default:
1308 elog(ERROR, "invalid tuplesort state");
1309 break;
1310 }
1311 MemoryContextSwitchTo(oldcontext);
1312}
Datum datum1
Definition: tuplesort.h:151
#define REMOVEABBREV(state, stup, count)
Definition: tuplesort.c:395
static bool grow_memtuples(Tuplesortstate *state)
Definition: tuplesort.c:1048
static void make_bounded_heap(Tuplesortstate *state)
Definition: tuplesort.c:2583
static bool consider_abort_common(Tuplesortstate *state)
Definition: tuplesort.c:1315

References Assert(), CHECK_FOR_INTERRUPTS, COMPARETUP, consider_abort_common(), SortTuple::datum1, dumptuples(), elog, ERROR, free_sort_tuple(), grow_memtuples(), inittapes(), LACKMEM, LEADER, LOG, make_bounded_heap(), MemoryContextSwitchTo(), pg_rusage_show(), REMOVEABBREV, trace_sort, TSS_BOUNDED, TSS_BUILDRUNS, TSS_INITIAL, tuplesort_heap_replace_top(), and USEMEM.

Referenced by tuplesort_putbrintuple(), tuplesort_putdatum(), tuplesort_putgintuple(), tuplesort_putheaptuple(), tuplesort_putindextuplevalues(), and tuplesort_puttupleslot().

◆ tuplesort_readtup_alloc()

void * tuplesort_readtup_alloc ( Tuplesortstate state,
Size  tuplen 
)

Definition at line 2877 of file tuplesort.c.

2878{
2879 SlabSlot *buf;
2880
2881 /*
2882 * We pre-allocate enough slots in the slab arena that we should never run
2883 * out.
2884 */
2885 Assert(state->slabFreeHead);
2886
2887 if (tuplen > SLAB_SLOT_SIZE || !state->slabFreeHead)
2888 return MemoryContextAlloc(state->base.sortcontext, tuplen);
2889 else
2890 {
2891 buf = state->slabFreeHead;
2892 /* Reuse this slot */
2893 state->slabFreeHead = buf->nextfree;
2894
2895 return buf;
2896 }
2897}

References Assert(), buf, MemoryContextAlloc(), and SLAB_SLOT_SIZE.

Referenced by readtup_cluster(), readtup_datum(), readtup_heap(), readtup_index(), readtup_index_brin(), and readtup_index_gin().

◆ tuplesort_rescan()

void tuplesort_rescan ( Tuplesortstate state)

Definition at line 2398 of file tuplesort.c.

2399{
2400 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2401
2402 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2403
2404 switch (state->status)
2405 {
2406 case TSS_SORTEDINMEM:
2407 state->current = 0;
2408 state->eof_reached = false;
2409 state->markpos_offset = 0;
2410 state->markpos_eof = false;
2411 break;
2412 case TSS_SORTEDONTAPE:
2413 LogicalTapeRewindForRead(state->result_tape, 0);
2414 state->eof_reached = false;
2415 state->markpos_block = 0L;
2416 state->markpos_offset = 0;
2417 state->markpos_eof = false;
2418 break;
2419 default:
2420 elog(ERROR, "invalid tuplesort state");
2421 break;
2422 }
2423
2424 MemoryContextSwitchTo(oldcontext);
2425}

References Assert(), elog, ERROR, LogicalTapeRewindForRead(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecReScanSort(), mode_final(), percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), and percentile_disc_multi_final().

◆ tuplesort_reset()

void tuplesort_reset ( Tuplesortstate state)

Definition at line 1015 of file tuplesort.c.

1016{
1019
1020 /*
1021 * After we've freed up per-batch memory, re-setup all of the state common
1022 * to both the first batch and any subsequent batch.
1023 */
1025
1026 state->lastReturnedTuple = NULL;
1027 state->slabMemoryBegin = NULL;
1028 state->slabMemoryEnd = NULL;
1029 state->slabFreeHead = NULL;
1030}

References tuplesort_begin_batch(), tuplesort_free(), and tuplesort_updatemax().

Referenced by ExecIncrementalSort(), ExecReScanIncrementalSort(), and switchToPresortedPrefixMode().

◆ tuplesort_restorepos()

void tuplesort_restorepos ( Tuplesortstate state)

Definition at line 2462 of file tuplesort.c.

2463{
2464 MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
2465
2466 Assert(state->base.sortopt & TUPLESORT_RANDOMACCESS);
2467
2468 switch (state->status)
2469 {
2470 case TSS_SORTEDINMEM:
2471 state->current = state->markpos_offset;
2472 state->eof_reached = state->markpos_eof;
2473 break;
2474 case TSS_SORTEDONTAPE:
2475 LogicalTapeSeek(state->result_tape,
2476 state->markpos_block,
2477 state->markpos_offset);
2478 state->eof_reached = state->markpos_eof;
2479 break;
2480 default:
2481 elog(ERROR, "invalid tuplesort state");
2482 break;
2483 }
2484
2485 MemoryContextSwitchTo(oldcontext);
2486}
void LogicalTapeSeek(LogicalTape *lt, int64 blocknum, int offset)
Definition: logtape.c:1133

References Assert(), elog, ERROR, LogicalTapeSeek(), MemoryContextSwitchTo(), TSS_SORTEDINMEM, TSS_SORTEDONTAPE, and TUPLESORT_RANDOMACCESS.

Referenced by ExecSortRestrPos().

◆ tuplesort_set_bound()

void tuplesort_set_bound ( Tuplesortstate state,
int64  bound 
)

Definition at line 834 of file tuplesort.c.

835{
836 /* Assert we're called before loading any tuples */
837 Assert(state->status == TSS_INITIAL && state->memtupcount == 0);
838 /* Assert we allow bounded sorts */
839 Assert(state->base.sortopt & TUPLESORT_ALLOWBOUNDED);
840 /* Can't set the bound twice, either */
841 Assert(!state->bounded);
842 /* Also, this shouldn't be called in a parallel worker */
844
845 /* Parallel leader allows but ignores hint */
846 if (LEADER(state))
847 return;
848
849#ifdef DEBUG_BOUNDED_SORT
850 /* Honor GUC setting that disables the feature (for easy testing) */
851 if (!optimize_bounded_sort)
852 return;
853#endif
854
855 /* We want to be able to compute bound * 2, so limit the setting */
856 if (bound > (int64) (INT_MAX / 2))
857 return;
858
859 state->bounded = true;
860 state->bound = (int) bound;
861
862 /*
863 * Bounded sorts are not an effective target for abbreviated key
864 * optimization. Disable by setting state to be consistent with no
865 * abbreviation support.
866 */
867 state->base.sortKeys->abbrev_converter = NULL;
868 if (state->base.sortKeys->abbrev_full_comparator)
869 state->base.sortKeys->comparator = state->base.sortKeys->abbrev_full_comparator;
870
871 /* Not strictly necessary, but be tidy */
872 state->base.sortKeys->abbrev_abort = NULL;
873 state->base.sortKeys->abbrev_full_comparator = NULL;
874}
#define TUPLESORT_ALLOWBOUNDED
Definition: tuplesort.h:100

References Assert(), LEADER, TSS_INITIAL, TUPLESORT_ALLOWBOUNDED, and WORKER.

Referenced by ExecIncrementalSort(), ExecSort(), and switchToPresortedPrefixMode().

◆ tuplesort_skiptuples()

bool tuplesort_skiptuples ( Tuplesortstate state,
int64  ntuples,
bool  forward 
)

Definition at line 1706 of file tuplesort.c.

1707{
1708 MemoryContext oldcontext;
1709
1710 /*
1711 * We don't actually support backwards skip yet, because no callers need
1712 * it. The API is designed to allow for that later, though.
1713 */
1714 Assert(forward);
1715 Assert(ntuples >= 0);
1716 Assert(!WORKER(state));
1717
1718 switch (state->status)
1719 {
1720 case TSS_SORTEDINMEM:
1721 if (state->memtupcount - state->current >= ntuples)
1722 {
1723 state->current += ntuples;
1724 return true;
1725 }
1726 state->current = state->memtupcount;
1727 state->eof_reached = true;
1728
1729 /*
1730 * Complain if caller tries to retrieve more tuples than
1731 * originally asked for in a bounded sort. This is because
1732 * returning EOF here might be the wrong thing.
1733 */
1734 if (state->bounded && state->current >= state->bound)
1735 elog(ERROR, "retrieved too many tuples in a bounded sort");
1736
1737 return false;
1738
1739 case TSS_SORTEDONTAPE:
1740 case TSS_FINALMERGE:
1741
1742 /*
1743 * We could probably optimize these cases better, but for now it's
1744 * not worth the trouble.
1745 */
1746 oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
1747 while (ntuples-- > 0)
1748 {
1749 SortTuple stup;
1750
1751 if (!tuplesort_gettuple_common(state, forward, &stup))
1752 {
1753 MemoryContextSwitchTo(oldcontext);
1754 return false;
1755 }
1757 }
1758 MemoryContextSwitchTo(oldcontext);
1759 return true;
1760
1761 default:
1762 elog(ERROR, "invalid tuplesort state");
1763 return false; /* keep compiler quiet */
1764 }
1765}
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
Definition: tuplesort.c:1466

References Assert(), CHECK_FOR_INTERRUPTS, elog, ERROR, MemoryContextSwitchTo(), TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, tuplesort_gettuple_common(), and WORKER.

Referenced by percentile_cont_final_common(), percentile_cont_multi_final_common(), percentile_disc_final(), and percentile_disc_multi_final().

◆ tuplesort_sort_memtuples()

static void tuplesort_sort_memtuples ( Tuplesortstate state)
static

Definition at line 2672 of file tuplesort.c.

2673{
2674 Assert(!LEADER(state));
2675
2676 if (state->memtupcount > 1)
2677 {
2678 /*
2679 * Do we have the leading column's value or abbreviation in datum1,
2680 * and is there a specialization for its comparator?
2681 */
2682 if (state->base.haveDatum1 && state->base.sortKeys)
2683 {
2684 if (state->base.sortKeys[0].comparator == ssup_datum_unsigned_cmp)
2685 {
2686 qsort_tuple_unsigned(state->memtuples,
2687 state->memtupcount,
2688 state);
2689 return;
2690 }
2691 else if (state->base.sortKeys[0].comparator == ssup_datum_signed_cmp)
2692 {
2693 qsort_tuple_signed(state->memtuples,
2694 state->memtupcount,
2695 state);
2696 return;
2697 }
2698 else if (state->base.sortKeys[0].comparator == ssup_datum_int32_cmp)
2699 {
2700 qsort_tuple_int32(state->memtuples,
2701 state->memtupcount,
2702 state);
2703 return;
2704 }
2705 }
2706
2707 /* Can we use the single-key sort function? */
2708 if (state->base.onlyKey != NULL)
2709 {
2710 qsort_ssup(state->memtuples, state->memtupcount,
2711 state->base.onlyKey);
2712 }
2713 else
2714 {
2715 qsort_tuple(state->memtuples,
2716 state->memtupcount,
2717 state->base.comparetup,
2718 state);
2719 }
2720 }
2721}
int ssup_datum_signed_cmp(Datum x, Datum y, SortSupport ssup)
Definition: tuplesort.c:3144
int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup)
Definition: tuplesort.c:3133
int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup)
Definition: tuplesort.c:3158

References Assert(), LEADER, ssup_datum_int32_cmp(), ssup_datum_signed_cmp(), and ssup_datum_unsigned_cmp().

Referenced by dumptuples(), and tuplesort_performsort().

◆ tuplesort_space_type_name()

const char * tuplesort_space_type_name ( TuplesortSpaceType  t)

Definition at line 2562 of file tuplesort.c.

2563{
2565 return t == SORT_SPACE_TYPE_DISK ? "Disk" : "Memory";
2566}

References Assert(), SORT_SPACE_TYPE_DISK, and SORT_SPACE_TYPE_MEMORY.

Referenced by show_incremental_sort_group_info(), and show_sort_info().

◆ tuplesort_updatemax()

static void tuplesort_updatemax ( Tuplesortstate state)
static

Definition at line 964 of file tuplesort.c.

965{
966 int64 spaceUsed;
967 bool isSpaceDisk;
968
969 /*
970 * Note: it might seem we should provide both memory and disk usage for a
971 * disk-based sort. However, the current code doesn't track memory space
972 * accurately once we have begun to return tuples to the caller (since we
973 * don't account for pfree's the caller is expected to do), so we cannot
974 * rely on availMem in a disk sort. This does not seem worth the overhead
975 * to fix. Is it worth creating an API for the memory context code to
976 * tell us how much is actually used in sortcontext?
977 */
978 if (state->tapeset)
979 {
980 isSpaceDisk = true;
981 spaceUsed = LogicalTapeSetBlocks(state->tapeset) * BLCKSZ;
982 }
983 else
984 {
985 isSpaceDisk = false;
986 spaceUsed = state->allowedMem - state->availMem;
987 }
988
989 /*
990 * Sort evicts data to the disk when it wasn't able to fit that data into
991 * main memory. This is why we assume space used on the disk to be more
992 * important for tracking resource usage than space used in memory. Note
993 * that the amount of space occupied by some tupleset on the disk might be
994 * less than amount of space occupied by the same tupleset in memory due
995 * to more compact representation.
996 */
997 if ((isSpaceDisk && !state->isMaxSpaceDisk) ||
998 (isSpaceDisk == state->isMaxSpaceDisk && spaceUsed > state->maxSpace))
999 {
1000 state->maxSpace = spaceUsed;
1001 state->isMaxSpaceDisk = isSpaceDisk;
1002 state->maxSpaceStatus = state->status;
1003 }
1004}

References LogicalTapeSetBlocks().

Referenced by tuplesort_get_stats(), and tuplesort_reset().

◆ tuplesort_used_bound()

bool tuplesort_used_bound ( Tuplesortstate state)

Definition at line 882 of file tuplesort.c.

883{
884 return state->boundUsed;
885}

Referenced by ExecIncrementalSort().

◆ worker_freeze_result_tape()

static void worker_freeze_result_tape ( Tuplesortstate state)
static

Definition at line 3003 of file tuplesort.c.

3004{
3005 Sharedsort *shared = state->shared;
3007
3009 Assert(state->result_tape != NULL);
3010 Assert(state->memtupcount == 0);
3011
3012 /*
3013 * Free most remaining memory, in case caller is sensitive to our holding
3014 * on to it. memtuples may not be a tiny merge heap at this point.
3015 */
3016 pfree(state->memtuples);
3017 /* Be tidy */
3018 state->memtuples = NULL;
3019 state->memtupsize = 0;
3020
3021 /*
3022 * Parallel worker requires result tape metadata, which is to be stored in
3023 * shared memory for leader
3024 */
3025 LogicalTapeFreeze(state->result_tape, &output);
3026
3027 /* Store properties of output tape, and update finished worker count */
3028 SpinLockAcquire(&shared->mutex);
3029 shared->tapes[state->worker] = output;
3030 shared->workersFinished++;
3031 SpinLockRelease(&shared->mutex);
3032}
FILE * output

References Assert(), LogicalTapeFreeze(), Sharedsort::mutex, output, pfree(), SpinLockAcquire, SpinLockRelease, Sharedsort::tapes, WORKER, and Sharedsort::workersFinished.

Referenced by mergeruns(), and worker_nomergeruns().

◆ worker_get_identifier()

static int worker_get_identifier ( Tuplesortstate state)
static

Definition at line 2975 of file tuplesort.c.

2976{
2977 Sharedsort *shared = state->shared;
2978 int worker;
2979
2981
2982 SpinLockAcquire(&shared->mutex);
2983 worker = shared->currentWorker++;
2984 SpinLockRelease(&shared->mutex);
2985
2986 return worker;
2987}

References Assert(), Sharedsort::currentWorker, Sharedsort::mutex, SpinLockAcquire, SpinLockRelease, and WORKER.

Referenced by tuplesort_begin_common().

◆ worker_nomergeruns()

static void worker_nomergeruns ( Tuplesortstate state)
static

Definition at line 3041 of file tuplesort.c.

3042{
3044 Assert(state->result_tape == NULL);
3045 Assert(state->nOutputRuns == 1);
3046
3047 state->result_tape = state->destTape;
3049}

References Assert(), WORKER, and worker_freeze_result_tape().

Referenced by tuplesort_performsort().

Variable Documentation

◆ trace_sort