118#define INITIAL_MEMTUPSIZE Max(1024, \
119 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1)
124#ifdef DEBUG_BOUNDED_SORT
125bool optimize_bounded_sort =
true;
140#define SLAB_SLOT_SIZE 1024
176#define TAPE_BUFFER_OVERHEAD BLCKSZ
177#define MERGE_BUFFER_SIZE (BLCKSZ * 32)
373#define IS_SLAB_SLOT(state, tuple) \
374 ((char *) (tuple) >= (state)->slabMemoryBegin && \
375 (char *) (tuple) < (state)->slabMemoryEnd)
381#define RELEASE_SLAB_SLOT(state, tuple) \
383 SlabSlot *buf = (SlabSlot *) tuple; \
385 if (IS_SLAB_SLOT((state), buf)) \
387 buf->nextfree = (state)->slabFreeHead; \
388 (state)->slabFreeHead = buf; \
393#define REMOVEABBREV(state,stup,count) ((*(state)->base.removeabbrev) (state, stup, count))
394#define COMPARETUP(state,a,b) ((*(state)->base.comparetup) (a, b, state))
395#define WRITETUP(state,tape,stup) ((*(state)->base.writetup) (state, tape, stup))
396#define READTUP(state,stup,tape,len) ((*(state)->base.readtup) (state, stup, tape, len))
397#define FREESTATE(state) ((state)->base.freestate ? (*(state)->base.freestate) (state) : (void) 0)
398#define LACKMEM(state) ((state)->availMem < 0 && !(state)->slabAllocatorUsed)
399#define USEMEM(state,amt) ((state)->availMem -= (amt))
400#define FREEMEM(state,amt) ((state)->availMem += (amt))
401#define SERIAL(state) ((state)->shared == NULL)
402#define WORKER(state) ((state)->shared && (state)->worker != -1)
403#define LEADER(state) ((state)->shared && (state)->worker == -1)
498 b->datum1,
b->isnull1,
499 &
state->base.sortKeys[0]);
507 if (
state->base.onlyKey != NULL)
520 b->datum1,
b->isnull1,
521 &
state->base.sortKeys[0]);
530 if (
state->base.onlyKey != NULL)
543 b->datum1,
b->isnull1,
544 &
state->base.sortKeys[0]);
553 if (
state->base.onlyKey != NULL)
568#define ST_SORT qsort_tuple_unsigned
569#define ST_ELEMENT_TYPE SortTuple
570#define ST_COMPARE(a, b, state) qsort_tuple_unsigned_compare(a, b, state)
571#define ST_COMPARE_ARG_TYPE Tuplesortstate
572#define ST_CHECK_FOR_INTERRUPTS
573#define ST_SCOPE static
577#define ST_SORT qsort_tuple_signed
578#define ST_ELEMENT_TYPE SortTuple
579#define ST_COMPARE(a, b, state) qsort_tuple_signed_compare(a, b, state)
580#define ST_COMPARE_ARG_TYPE Tuplesortstate
581#define ST_CHECK_FOR_INTERRUPTS
582#define ST_SCOPE static
586#define ST_SORT qsort_tuple_int32
587#define ST_ELEMENT_TYPE SortTuple
588#define ST_COMPARE(a, b, state) qsort_tuple_int32_compare(a, b, state)
589#define ST_COMPARE_ARG_TYPE Tuplesortstate
590#define ST_CHECK_FOR_INTERRUPTS
591#define ST_SCOPE static
595#define ST_SORT qsort_tuple
596#define ST_ELEMENT_TYPE SortTuple
597#define ST_COMPARE_RUNTIME_POINTER
598#define ST_COMPARE_ARG_TYPE Tuplesortstate
599#define ST_CHECK_FOR_INTERRUPTS
600#define ST_SCOPE static
605#define ST_SORT qsort_ssup
606#define ST_ELEMENT_TYPE SortTuple
607#define ST_COMPARE(a, b, ssup) \
608 ApplySortComparator((a)->datum1, (a)->isnull1, \
609 (b)->datum1, (b)->isnull1, (ssup))
610#define ST_COMPARE_ARG_TYPE SortSupportData
611#define ST_CHECK_FOR_INTERRUPTS
612#define ST_SCOPE static
645 elog(
ERROR,
"random access disallowed under parallel sort");
679 state->base.sortopt = sortopt;
680 state->base.tuples =
true;
681 state->abbrevNext = 10;
690 state->base.sortcontext = sortcontext;
691 state->base.maincontext = maincontext;
694 state->memtuples = NULL;
709 state->shared = NULL;
711 state->nParticipants = -1;
718 state->nParticipants = -1;
772 state->bounded =
false;
773 state->boundUsed =
false;
777 state->tapeset = NULL;
779 state->memtupcount = 0;
781 state->growmemtuples =
true;
782 state->slabAllocatorUsed =
false;
786 state->memtuples = NULL;
789 if (
state->memtuples == NULL)
797 elog(
ERROR,
"insufficient memory allowed for sort");
799 state->currentRun = 0;
806 state->result_tape = NULL;
839#ifdef DEBUG_BOUNDED_SORT
841 if (!optimize_bounded_sort)
846 if (bound > (
int64) (INT_MAX / 2))
849 state->bounded =
true;
850 state->bound = (int) bound;
857 state->base.sortKeys->abbrev_converter = NULL;
858 if (
state->base.sortKeys->abbrev_full_comparator)
859 state->base.sortKeys->comparator =
state->base.sortKeys->abbrev_full_comparator;
862 state->base.sortKeys->abbrev_abort = NULL;
863 state->base.sortKeys->abbrev_full_comparator = NULL;
874 return state->boundUsed;
892 spaceUsed = (
state->allowedMem -
state->availMem + 1023) / 1024;
907 elog(
LOG,
"%s of worker %d ended, %" PRId64
" disk blocks used: %s",
908 SERIAL(
state) ?
"external sort" :
"parallel external sort",
911 elog(
LOG,
"%s of worker %d ended, %" PRId64
" KB used: %s",
912 SERIAL(
state) ?
"internal sort" :
"unperformed parallel sort",
916 TRACE_POSTGRESQL_SORT_DONE(
state->tapeset != NULL, spaceUsed);
976 spaceUsed =
state->allowedMem -
state->availMem;
987 if ((isSpaceDisk && !
state->isMaxSpaceDisk) ||
988 (isSpaceDisk ==
state->isMaxSpaceDisk && spaceUsed >
state->maxSpace))
990 state->maxSpace = spaceUsed;
991 state->isMaxSpaceDisk = isSpaceDisk;
1016 state->lastReturnedTuple = NULL;
1017 state->slabMemoryBegin = NULL;
1018 state->slabMemoryEnd = NULL;
1019 state->slabFreeHead = NULL;
1041 int memtupsize =
state->memtupsize;
1045 if (!
state->growmemtuples)
1049 if (memNowUsed <= state->availMem)
1055 if (memtupsize < INT_MAX / 2)
1056 newmemtupsize = memtupsize * 2;
1059 newmemtupsize = INT_MAX;
1060 state->growmemtuples =
false;
1093 grow_ratio = (double)
state->allowedMem / (
double) memNowUsed;
1094 if (memtupsize * grow_ratio < INT_MAX)
1095 newmemtupsize = (int) (memtupsize * grow_ratio);
1097 newmemtupsize = INT_MAX;
1100 state->growmemtuples =
false;
1104 if (newmemtupsize <= memtupsize)
1117 state->growmemtuples =
false;
1136 state->memtupsize = newmemtupsize;
1142 elog(
ERROR,
"unexpected out-of-memory situation in tuplesort");
1147 state->growmemtuples =
false;
1156 bool useAbbrev,
Size tuplen)
1164 state->tupleMem += tuplen;
1181 state->base.sortKeys);
1197 switch (
state->status)
1208 if (
state->memtupcount >=
state->memtupsize - 1)
1213 state->memtuples[
state->memtupcount++] = *tuple;
1227 if (
state->bounded &&
1232 elog(
LOG,
"switching to bounded heapsort at %d tuples: %s",
1289 state->memtuples[
state->memtupcount++] = *tuple;
1307 Assert(
state->base.sortKeys[0].abbrev_converter != NULL);
1308 Assert(
state->base.sortKeys[0].abbrev_abort != NULL);
1309 Assert(
state->base.sortKeys[0].abbrev_full_comparator != NULL);
1318 state->abbrevNext *= 2;
1324 if (!
state->base.sortKeys->abbrev_abort(
state->memtupcount,
1325 state->base.sortKeys))
1332 state->base.sortKeys[0].comparator =
state->base.sortKeys[0].abbrev_full_comparator;
1333 state->base.sortKeys[0].abbrev_converter = NULL;
1335 state->base.sortKeys[0].abbrev_abort = NULL;
1336 state->base.sortKeys[0].abbrev_full_comparator = NULL;
1354 elog(
LOG,
"performsort of worker %d starting: %s",
1357 switch (
state->status)
1392 state->eof_reached =
false;
1393 state->markpos_block = 0L;
1394 state->markpos_offset = 0;
1395 state->markpos_eof =
false;
1408 state->eof_reached =
false;
1409 state->markpos_offset = 0;
1410 state->markpos_eof =
false;
1423 state->eof_reached =
false;
1424 state->markpos_block = 0L;
1425 state->markpos_offset = 0;
1426 state->markpos_eof =
false;
1437 elog(
LOG,
"performsort of worker %d done (except %d-way final merge): %s",
1441 elog(
LOG,
"performsort of worker %d done: %s",
1459 unsigned int tuplen;
1464 switch (
state->status)
1476 state->eof_reached =
true;
1484 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
1490 if (
state->current <= 0)
1497 if (
state->eof_reached)
1498 state->eof_reached =
false;
1502 if (
state->current <= 0)
1505 *stup =
state->memtuples[
state->current - 1];
1518 if (
state->lastReturnedTuple)
1521 state->lastReturnedTuple = NULL;
1526 if (
state->eof_reached)
1529 if ((tuplen =
getlen(
state->result_tape,
true)) != 0)
1544 state->eof_reached =
true;
1555 if (
state->eof_reached)
1563 2 *
sizeof(
unsigned int));
1566 else if (nmoved != 2 *
sizeof(
unsigned int))
1567 elog(
ERROR,
"unexpected tape position");
1568 state->eof_reached =
false;
1577 sizeof(
unsigned int));
1580 else if (nmoved !=
sizeof(
unsigned int))
1581 elog(
ERROR,
"unexpected tape position");
1588 tuplen + 2 *
sizeof(
unsigned int));
1589 if (nmoved == tuplen +
sizeof(
unsigned int))
1600 else if (nmoved != tuplen + 2 *
sizeof(
unsigned int))
1601 elog(
ERROR,
"bogus tuple length in backward scan");
1613 if (nmoved != tuplen)
1614 elog(
ERROR,
"bogus tuple length in backward scan");
1634 if (
state->lastReturnedTuple)
1637 state->lastReturnedTuple = NULL;
1643 if (
state->memtupcount > 0)
1645 int srcTapeIndex =
state->memtuples[0].srctape;
1649 *stup =
state->memtuples[0];
1668 state->nInputRuns--;
1677 newtup.
srctape = srcTapeIndex;
1708 switch (
state->status)
1711 if (
state->memtupcount -
state->current >= ntuples)
1713 state->current += ntuples;
1717 state->eof_reached =
true;
1725 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
1737 while (ntuples-- > 0)
1789 mOrder = allowedMem /
1830 nOutputRuns = (nInputRuns + nInputTapes - 1) / nInputTapes;
1832 nOutputTapes =
Min(nOutputRuns, maxOutputTapes);
1868 elog(
LOG,
"worker %d switching to external sort with %d tapes: %s",
1875 state->shared ? &
state->shared->fileset : NULL,
1878 state->currentRun = 0;
1883 state->inputTapes = NULL;
1884 state->nInputTapes = 0;
1885 state->nInputRuns = 0;
1888 state->nOutputTapes = 0;
1889 state->nOutputRuns = 0;
1949 state->nOutputTapes++;
1950 state->nOutputRuns++;
1959 state->nOutputRuns++;
1975 state->slabMemoryEnd =
state->slabMemoryBegin +
1980 p =
state->slabMemoryBegin;
1981 for (
i = 0;
i < numSlots - 1;
i++)
1990 state->slabMemoryBegin =
state->slabMemoryEnd = NULL;
1991 state->slabFreeHead = NULL;
1993 state->slabAllocatorUsed =
true;
2010 if (
state->base.sortKeys != NULL &&
state->base.sortKeys->abbrev_converter != NULL)
2018 state->base.sortKeys->abbrev_converter = NULL;
2019 state->base.sortKeys->comparator =
state->base.sortKeys->abbrev_full_comparator;
2022 state->base.sortKeys->abbrev_abort = NULL;
2023 state->base.sortKeys->abbrev_full_comparator = NULL;
2038 state->memtuples = NULL;
2053 if (
state->base.tuples)
2079 elog(
LOG,
"worker %d using %zu KB of memory for tape buffers",
2080 state->worker,
state->tape_buffer_mem / 1024);
2090 if (
state->nInputRuns == 0)
2092 int64 input_buffer_size;
2095 if (
state->nInputTapes > 0)
2097 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2113 state->nOutputTapes = 0;
2114 state->nOutputRuns = 0;
2126 elog(
LOG,
"starting merge pass of %d input runs on %d tapes, " INT64_FORMAT " KB of memory for each input tape: %s",
2127 state->nInputRuns,
state->nInputTapes, input_buffer_size / 1024,
2131 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2163 if (
state->nInputRuns == 0 &&
state->nOutputRuns <= 1)
2178 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2204 while (
state->memtupcount > 0)
2209 srcTapeIndex =
state->memtuples[0].srctape;
2210 srcTape =
state->inputTapes[srcTapeIndex];
2214 if (
state->memtuples[0].tuple)
2229 state->nInputRuns--;
2256 for (srcTapeIndex = 0; srcTapeIndex < activeTapes; srcTapeIndex++)
2276 unsigned int tuplen;
2279 if ((tuplen =
getlen(srcTape,
true)) == 0)
2314 if (
state->memtupcount == 0 &&
state->currentRun > 0)
2323 if (
state->currentRun == INT_MAX)
2325 (
errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2326 errmsg(
"cannot have more than %d runs for an external sort",
2329 if (
state->currentRun > 0)
2332 state->currentRun++;
2335 elog(
LOG,
"worker %d starting quicksort of run %d: %s",
2346 elog(
LOG,
"worker %d finished quicksort of run %d: %s",
2350 memtupwrite =
state->memtupcount;
2351 for (
i = 0;
i < memtupwrite;
i++)
2358 state->memtupcount = 0;
2374 state->tupleMem = 0;
2379 elog(
LOG,
"worker %d finished writing run %d to tape %d: %s",
2394 switch (
state->status)
2398 state->eof_reached =
false;
2399 state->markpos_offset = 0;
2400 state->markpos_eof =
false;
2404 state->eof_reached =
false;
2405 state->markpos_block = 0L;
2406 state->markpos_offset = 0;
2407 state->markpos_eof =
false;
2427 switch (
state->status)
2435 &
state->markpos_block,
2436 &
state->markpos_offset);
2458 switch (
state->status)
2466 state->markpos_block,
2467 state->markpos_offset);
2499 if (
state->isMaxSpaceDisk)
2505 switch (
state->maxSpaceStatus)
2508 if (
state->boundUsed)
2534 return "still in progress";
2536 return "top-N heapsort";
2540 return "external sort";
2542 return "external merge";
2575 int tupcount =
state->memtupcount;
2586 state->memtupcount = 0;
2587 for (
i = 0;
i < tupcount;
i++)
2624 int tupcount =
state->memtupcount;
2636 while (
state->memtupcount > 1)
2644 state->memtupcount = tupcount;
2653 state->boundUsed =
true;
2666 if (
state->memtupcount > 1)
2672 if (
state->base.haveDatum1 &&
state->base.sortKeys)
2676 qsort_tuple_unsigned(
state->memtuples,
2683 qsort_tuple_signed(
state->memtuples,
2690 qsort_tuple_int32(
state->memtuples,
2698 if (
state->base.onlyKey != NULL)
2700 qsort_ssup(
state->memtuples,
state->memtupcount,
2701 state->base.onlyKey);
2705 qsort_tuple(
state->memtuples,
2707 state->base.comparetup,
2728 memtuples =
state->memtuples;
2737 j =
state->memtupcount++;
2740 int i = (
j - 1) >> 1;
2744 memtuples[
j] = memtuples[
i];
2747 memtuples[
j] = *tuple;
2763 if (--
state->memtupcount <= 0)
2770 tuple = &memtuples[
state->memtupcount];
2797 n =
state->memtupcount;
2801 unsigned int j = 2 *
i + 1;
2810 memtuples[
i] = memtuples[
j];
2813 memtuples[
i] = *tuple;
2827 for (nkey = 0; nkey <
state->base.nKeys; nkey++, sortKey++)
2847 if (
len == 0 && !eofOK)
2855 unsigned int len = 0;
2883 state->slabFreeHead =
buf->nextfree;
2932 shared->
nTapes = nWorkers;
2933 for (
i = 0;
i < nWorkers;
i++)
3008 state->memtuples = NULL;
3009 state->memtupsize = 0;
3056 int nParticipants =
state->nParticipants;
3057 int workersFinished;
3061 Assert(nParticipants >= 1);
3067 if (nParticipants != workersFinished)
3068 elog(
ERROR,
"cannot take over tapes before all workers finish");
3082 state->currentRun = nParticipants;
3092 state->inputTapes = NULL;
3093 state->nInputTapes = 0;
3094 state->nInputRuns = 0;
3097 state->nOutputTapes = nParticipants;
3098 state->nOutputRuns = nParticipants;
3100 for (
j = 0;
j < nParticipants;
j++)
void PrepareTempTablespaces(void)
MemoryContext BumpContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
#define FLEXIBLE_ARRAY_MEMBER
#define pg_attribute_always_inline
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
#define palloc0_object(type)
static int compare(const void *arg1, const void *arg2)
Assert(PointerIsAligned(start, uint64))
void LogicalTapeRewindForRead(LogicalTape *lt, size_t buffer_size)
void LogicalTapeSetForgetFreeSpace(LogicalTapeSet *lts)
size_t LogicalTapeBackspace(LogicalTape *lt, size_t size)
size_t LogicalTapeRead(LogicalTape *lt, void *ptr, size_t size)
int64 LogicalTapeSetBlocks(LogicalTapeSet *lts)
void LogicalTapeClose(LogicalTape *lt)
void LogicalTapeSetClose(LogicalTapeSet *lts)
void LogicalTapeSeek(LogicalTape *lt, int64 blocknum, int offset)
LogicalTapeSet * LogicalTapeSetCreate(bool preallocate, SharedFileSet *fileset, int worker)
void LogicalTapeTell(LogicalTape *lt, int64 *blocknum, int *offset)
void LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size)
LogicalTape * LogicalTapeCreate(LogicalTapeSet *lts)
void LogicalTapeFreeze(LogicalTape *lt, TapeShare *share)
LogicalTape * LogicalTapeImport(LogicalTapeSet *lts, int worker, TapeShare *shared)
void * MemoryContextAlloc(MemoryContext context, Size size)
void MemoryContextReset(MemoryContext context)
void pfree(void *pointer)
Size GetMemoryChunkSpace(void *pointer)
void * palloc0(Size size)
MemoryContext CurrentMemoryContext
void MemoryContextDelete(MemoryContext context)
void * repalloc_huge(void *pointer, Size size)
void MemoryContextResetOnly(MemoryContext context)
#define AllocSetContextCreate
#define ALLOCSET_DEFAULT_SIZES
#define CHECK_FOR_INTERRUPTS()
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
const char * pg_rusage_show(const PGRUsage *ru0)
void pg_rusage_init(PGRUsage *ru0)
static char buf[DEFAULT_XLOG_SEG_SIZE]
static int64 DatumGetInt64(Datum X)
static int32 DatumGetInt32(Datum X)
void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg)
void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
Size add_size(Size s1, Size s2)
Size mul_size(Size s1, Size s2)
static int ApplySignedSortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
static int ApplyUnsignedSortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
static int ApplyInt32SortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
#define SpinLockInit(lock)
#define SpinLockRelease(lock)
#define SpinLockAcquire(lock)
TapeShare tapes[FLEXIBLE_ARRAY_MEMBER]
TuplesortMethod sortMethod
TuplesortSpaceType spaceType
LogicalTape ** inputTapes
LogicalTape ** outputTapes
TupSortStatus maxSpaceStatus
LogicalTape * result_tape
void tuplesort_rescan(Tuplesortstate *state)
void tuplesort_performsort(Tuplesortstate *state)
int tuplesort_merge_order(int64 allowedMem)
#define TAPE_BUFFER_OVERHEAD
static void tuplesort_heap_delete_top(Tuplesortstate *state)
#define INITIAL_MEMTUPSIZE
static unsigned int getlen(LogicalTape *tape, bool eofOK)
void tuplesort_initialize_shared(Sharedsort *shared, int nWorkers, dsm_segment *seg)
#define COMPARETUP(state, a, b)
static void selectnewtape(Tuplesortstate *state)
void tuplesort_reset(Tuplesortstate *state)
static void markrunend(LogicalTape *tape)
bool tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
#define REMOVEABBREV(state, stup, count)
static void reversedirection(Tuplesortstate *state)
#define USEMEM(state, amt)
static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple)
int ssup_datum_signed_cmp(Datum x, Datum y, SortSupport ssup)
static bool grow_memtuples(Tuplesortstate *state)
int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup)
static void beginmerge(Tuplesortstate *state)
static void make_bounded_heap(Tuplesortstate *state)
bool tuplesort_used_bound(Tuplesortstate *state)
#define WRITETUP(state, tape, stup)
static void sort_bounded_heap(Tuplesortstate *state)
static int worker_get_identifier(Tuplesortstate *state)
static void mergeonerun(Tuplesortstate *state)
#define FREEMEM(state, amt)
static void inittapestate(Tuplesortstate *state, int maxTapes)
static void leader_takeover_tapes(Tuplesortstate *state)
Size tuplesort_estimate_shared(int nWorkers)
void tuplesort_get_stats(Tuplesortstate *state, TuplesortInstrumentation *stats)
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
static void tuplesort_sort_memtuples(Tuplesortstate *state)
void tuplesort_end(Tuplesortstate *state)
static void inittapes(Tuplesortstate *state, bool mergeruns)
void tuplesort_markpos(Tuplesortstate *state)
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
const char * tuplesort_space_type_name(TuplesortSpaceType t)
#define MERGE_BUFFER_SIZE
#define READTUP(state, stup, tape, len)
int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup)
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
static int64 merge_read_buffer_size(int64 avail_mem, int nInputTapes, int nInputRuns, int maxOutputTapes)
static bool mergereadnext(Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
static void tuplesort_updatemax(Tuplesortstate *state)
static void worker_freeze_result_tape(Tuplesortstate *state)
static pg_attribute_always_inline int qsort_tuple_signed_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
#define RELEASE_SLAB_SLOT(state, tuple)
void tuplesort_attach_shared(Sharedsort *shared, dsm_segment *seg)
static void worker_nomergeruns(Tuplesortstate *state)
const char * tuplesort_method_name(TuplesortMethod m)
static pg_attribute_always_inline int qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple)
void tuplesort_restorepos(Tuplesortstate *state)
static pg_attribute_always_inline int qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
static void mergeruns(Tuplesortstate *state)
void * tuplesort_readtup_alloc(Tuplesortstate *state, Size tuplen)
static void tuplesort_begin_batch(Tuplesortstate *state)
void tuplesort_set_bound(Tuplesortstate *state, int64 bound)
static void init_slab_allocator(Tuplesortstate *state, int numSlots)
static bool consider_abort_common(Tuplesortstate *state)
static void tuplesort_free(Tuplesortstate *state)
static void dumptuples(Tuplesortstate *state, bool alltuples)
#define TupleSortUseBumpTupleCxt(opt)
#define TUPLESORT_RANDOMACCESS
#define TUPLESORT_ALLOWBOUNDED
@ SORT_TYPE_EXTERNAL_SORT
@ SORT_TYPE_TOP_N_HEAPSORT
@ SORT_TYPE_STILL_IN_PROGRESS
@ SORT_TYPE_EXTERNAL_MERGE
char buffer[SLAB_SLOT_SIZE]
union SlabSlot * nextfree