120 #define INITIAL_MEMTUPSIZE Max(1024, \
121 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1)
126 #ifdef DEBUG_BOUNDED_SORT
127 bool optimize_bounded_sort =
true;
142 #define SLAB_SLOT_SIZE 1024
178 #define TAPE_BUFFER_OVERHEAD BLCKSZ
179 #define MERGE_BUFFER_SIZE (BLCKSZ * 32)
375 #define IS_SLAB_SLOT(state, tuple) \
376 ((char *) (tuple) >= (state)->slabMemoryBegin && \
377 (char *) (tuple) < (state)->slabMemoryEnd)
383 #define RELEASE_SLAB_SLOT(state, tuple) \
385 SlabSlot *buf = (SlabSlot *) tuple; \
387 if (IS_SLAB_SLOT((state), buf)) \
389 buf->nextfree = (state)->slabFreeHead; \
390 (state)->slabFreeHead = buf; \
395 #define REMOVEABBREV(state,stup,count) ((*(state)->base.removeabbrev) (state, stup, count))
396 #define COMPARETUP(state,a,b) ((*(state)->base.comparetup) (a, b, state))
397 #define WRITETUP(state,tape,stup) ((*(state)->base.writetup) (state, tape, stup))
398 #define READTUP(state,stup,tape,len) ((*(state)->base.readtup) (state, stup, tape, len))
399 #define FREESTATE(state) ((state)->base.freestate ? (*(state)->base.freestate) (state) : (void) 0)
400 #define LACKMEM(state) ((state)->availMem < 0 && !(state)->slabAllocatorUsed)
401 #define USEMEM(state,amt) ((state)->availMem -= (amt))
402 #define FREEMEM(state,amt) ((state)->availMem += (amt))
403 #define SERIAL(state) ((state)->shared == NULL)
404 #define WORKER(state) ((state)->shared && (state)->worker != -1)
405 #define LEADER(state) ((state)->shared && (state)->worker == -1)
500 b->datum1,
b->isnull1,
501 &
state->base.sortKeys[0]);
509 if (
state->base.onlyKey != NULL)
515 #if SIZEOF_DATUM >= 8
522 compare = ApplySignedSortComparator(
a->datum1,
a->isnull1,
523 b->datum1,
b->isnull1,
524 &
state->base.sortKeys[0]);
533 if (
state->base.onlyKey != NULL)
547 b->datum1,
b->isnull1,
548 &
state->base.sortKeys[0]);
557 if (
state->base.onlyKey != NULL)
572 #define ST_SORT qsort_tuple_unsigned
573 #define ST_ELEMENT_TYPE SortTuple
574 #define ST_COMPARE(a, b, state) qsort_tuple_unsigned_compare(a, b, state)
575 #define ST_COMPARE_ARG_TYPE Tuplesortstate
576 #define ST_CHECK_FOR_INTERRUPTS
577 #define ST_SCOPE static
581 #if SIZEOF_DATUM >= 8
582 #define ST_SORT qsort_tuple_signed
583 #define ST_ELEMENT_TYPE SortTuple
584 #define ST_COMPARE(a, b, state) qsort_tuple_signed_compare(a, b, state)
585 #define ST_COMPARE_ARG_TYPE Tuplesortstate
586 #define ST_CHECK_FOR_INTERRUPTS
587 #define ST_SCOPE static
592 #define ST_SORT qsort_tuple_int32
593 #define ST_ELEMENT_TYPE SortTuple
594 #define ST_COMPARE(a, b, state) qsort_tuple_int32_compare(a, b, state)
595 #define ST_COMPARE_ARG_TYPE Tuplesortstate
596 #define ST_CHECK_FOR_INTERRUPTS
597 #define ST_SCOPE static
601 #define ST_SORT qsort_tuple
602 #define ST_ELEMENT_TYPE SortTuple
603 #define ST_COMPARE_RUNTIME_POINTER
604 #define ST_COMPARE_ARG_TYPE Tuplesortstate
605 #define ST_CHECK_FOR_INTERRUPTS
606 #define ST_SCOPE static
611 #define ST_SORT qsort_ssup
612 #define ST_ELEMENT_TYPE SortTuple
613 #define ST_COMPARE(a, b, ssup) \
614 ApplySortComparator((a)->datum1, (a)->isnull1, \
615 (b)->datum1, (b)->isnull1, (ssup))
616 #define ST_COMPARE_ARG_TYPE SortSupportData
617 #define ST_CHECK_FOR_INTERRUPTS
618 #define ST_SCOPE static
651 elog(
ERROR,
"random access disallowed under parallel sort");
685 state->base.sortopt = sortopt;
686 state->base.tuples =
true;
687 state->abbrevNext = 10;
695 state->allowedMem =
Max(workMem, 64) * (int64) 1024;
696 state->base.sortcontext = sortcontext;
697 state->base.maincontext = maincontext;
704 state->memtuples = NULL;
719 state->shared = NULL;
721 state->nParticipants = -1;
728 state->nParticipants = -1;
782 state->bounded =
false;
783 state->boundUsed =
false;
787 state->tapeset = NULL;
789 state->memtupcount = 0;
795 state->growmemtuples =
true;
796 state->slabAllocatorUsed =
false;
800 state->memtuples = NULL;
803 if (
state->memtuples == NULL)
811 elog(
ERROR,
"insufficient memory allowed for sort");
813 state->currentRun = 0;
820 state->result_tape = NULL;
853 #ifdef DEBUG_BOUNDED_SORT
855 if (!optimize_bounded_sort)
860 if (bound > (int64) (INT_MAX / 2))
863 state->bounded =
true;
864 state->bound = (int) bound;
871 state->base.sortKeys->abbrev_converter = NULL;
872 if (
state->base.sortKeys->abbrev_full_comparator)
873 state->base.sortKeys->comparator =
state->base.sortKeys->abbrev_full_comparator;
876 state->base.sortKeys->abbrev_abort = NULL;
877 state->base.sortKeys->abbrev_full_comparator = NULL;
888 return state->boundUsed;
906 spaceUsed = (
state->allowedMem -
state->availMem + 1023) / 1024;
921 elog(
LOG,
"%s of worker %d ended, %lld disk blocks used: %s",
922 SERIAL(
state) ?
"external sort" :
"parallel external sort",
925 elog(
LOG,
"%s of worker %d ended, %lld KB used: %s",
926 SERIAL(
state) ?
"internal sort" :
"unperformed parallel sort",
930 TRACE_POSTGRESQL_SORT_DONE(
state->tapeset != NULL, spaceUsed);
990 spaceUsed =
state->allowedMem -
state->availMem;
1001 if ((isSpaceDisk && !
state->isMaxSpaceDisk) ||
1002 (isSpaceDisk ==
state->isMaxSpaceDisk && spaceUsed >
state->maxSpace))
1004 state->maxSpace = spaceUsed;
1005 state->isMaxSpaceDisk = isSpaceDisk;
1030 state->lastReturnedTuple = NULL;
1031 state->slabMemoryBegin = NULL;
1032 state->slabMemoryEnd = NULL;
1033 state->slabFreeHead = NULL;
1055 int memtupsize =
state->memtupsize;
1056 int64 memNowUsed =
state->allowedMem -
state->availMem;
1059 if (!
state->growmemtuples)
1063 if (memNowUsed <= state->availMem)
1069 if (memtupsize < INT_MAX / 2)
1070 newmemtupsize = memtupsize * 2;
1073 newmemtupsize = INT_MAX;
1074 state->growmemtuples =
false;
1107 grow_ratio = (double)
state->allowedMem / (
double) memNowUsed;
1108 if (memtupsize * grow_ratio < INT_MAX)
1109 newmemtupsize = (int) (memtupsize * grow_ratio);
1111 newmemtupsize = INT_MAX;
1114 state->growmemtuples =
false;
1118 if (newmemtupsize <= memtupsize)
1131 state->growmemtuples =
false;
1145 if (
state->availMem < (int64) ((newmemtupsize - memtupsize) *
sizeof(
SortTuple)))
1150 state->memtupsize = newmemtupsize;
1156 elog(
ERROR,
"unexpected out-of-memory situation in tuplesort");
1161 state->growmemtuples =
false;
1170 bool useAbbrev,
Size tuplen)
1178 state->tupleMem += tuplen;
1195 state->base.sortKeys);
1211 switch (
state->status)
1222 if (
state->memtupcount >=
state->memtupsize - 1)
1227 state->memtuples[
state->memtupcount++] = *tuple;
1241 if (
state->bounded &&
1246 elog(
LOG,
"switching to bounded heapsort at %d tuples: %s",
1303 state->memtuples[
state->memtupcount++] = *tuple;
1321 Assert(
state->base.sortKeys[0].abbrev_converter != NULL);
1322 Assert(
state->base.sortKeys[0].abbrev_abort != NULL);
1323 Assert(
state->base.sortKeys[0].abbrev_full_comparator != NULL);
1332 state->abbrevNext *= 2;
1338 if (!
state->base.sortKeys->abbrev_abort(
state->memtupcount,
1339 state->base.sortKeys))
1346 state->base.sortKeys[0].comparator =
state->base.sortKeys[0].abbrev_full_comparator;
1347 state->base.sortKeys[0].abbrev_converter = NULL;
1349 state->base.sortKeys[0].abbrev_abort = NULL;
1350 state->base.sortKeys[0].abbrev_full_comparator = NULL;
1368 elog(
LOG,
"performsort of worker %d starting: %s",
1371 switch (
state->status)
1406 state->eof_reached =
false;
1407 state->markpos_block = 0L;
1408 state->markpos_offset = 0;
1409 state->markpos_eof =
false;
1422 state->eof_reached =
false;
1423 state->markpos_offset = 0;
1424 state->markpos_eof =
false;
1437 state->eof_reached =
false;
1438 state->markpos_block = 0L;
1439 state->markpos_offset = 0;
1440 state->markpos_eof =
false;
1451 elog(
LOG,
"performsort of worker %d done (except %d-way final merge): %s",
1455 elog(
LOG,
"performsort of worker %d done: %s",
1473 unsigned int tuplen;
1478 switch (
state->status)
1490 state->eof_reached =
true;
1498 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
1504 if (
state->current <= 0)
1511 if (
state->eof_reached)
1512 state->eof_reached =
false;
1516 if (
state->current <= 0)
1519 *stup =
state->memtuples[
state->current - 1];
1532 if (
state->lastReturnedTuple)
1535 state->lastReturnedTuple = NULL;
1540 if (
state->eof_reached)
1543 if ((tuplen =
getlen(
state->result_tape,
true)) != 0)
1558 state->eof_reached =
true;
1569 if (
state->eof_reached)
1577 2 *
sizeof(
unsigned int));
1580 else if (nmoved != 2 *
sizeof(
unsigned int))
1581 elog(
ERROR,
"unexpected tape position");
1582 state->eof_reached =
false;
1591 sizeof(
unsigned int));
1594 else if (nmoved !=
sizeof(
unsigned int))
1595 elog(
ERROR,
"unexpected tape position");
1602 tuplen + 2 *
sizeof(
unsigned int));
1603 if (nmoved == tuplen +
sizeof(
unsigned int))
1614 else if (nmoved != tuplen + 2 *
sizeof(
unsigned int))
1615 elog(
ERROR,
"bogus tuple length in backward scan");
1627 if (nmoved != tuplen)
1628 elog(
ERROR,
"bogus tuple length in backward scan");
1648 if (
state->lastReturnedTuple)
1651 state->lastReturnedTuple = NULL;
1657 if (
state->memtupcount > 0)
1659 int srcTapeIndex =
state->memtuples[0].srctape;
1663 *stup =
state->memtuples[0];
1682 state->nInputRuns--;
1691 newtup.
srctape = srcTapeIndex;
1722 switch (
state->status)
1725 if (
state->memtupcount -
state->current >= ntuples)
1727 state->current += ntuples;
1731 state->eof_reached =
true;
1739 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
1751 while (ntuples-- > 0)
1803 mOrder = allowedMem /
1844 nOutputRuns = (nInputRuns + nInputTapes - 1) / nInputTapes;
1846 nOutputTapes =
Min(nOutputRuns, maxOutputTapes);
1882 elog(
LOG,
"worker %d switching to external sort with %d tapes: %s",
1889 state->shared ? &
state->shared->fileset : NULL,
1892 state->currentRun = 0;
1897 state->inputTapes = NULL;
1898 state->nInputTapes = 0;
1899 state->nInputRuns = 0;
1902 state->nOutputTapes = 0;
1903 state->nOutputRuns = 0;
1963 state->nOutputTapes++;
1964 state->nOutputRuns++;
1973 state->nOutputRuns++;
1989 state->slabMemoryEnd =
state->slabMemoryBegin +
1994 p =
state->slabMemoryBegin;
1995 for (
i = 0;
i < numSlots - 1;
i++)
2004 state->slabMemoryBegin =
state->slabMemoryEnd = NULL;
2005 state->slabFreeHead = NULL;
2007 state->slabAllocatorUsed =
true;
2024 if (
state->base.sortKeys != NULL &&
state->base.sortKeys->abbrev_converter != NULL)
2032 state->base.sortKeys->abbrev_converter = NULL;
2033 state->base.sortKeys->comparator =
state->base.sortKeys->abbrev_full_comparator;
2036 state->base.sortKeys->abbrev_abort = NULL;
2037 state->base.sortKeys->abbrev_full_comparator = NULL;
2052 state->memtuples = NULL;
2067 if (
state->base.tuples)
2093 elog(
LOG,
"worker %d using %zu KB of memory for tape buffers",
2094 state->worker,
state->tape_buffer_mem / 1024);
2104 if (
state->nInputRuns == 0)
2106 int64 input_buffer_size;
2109 if (
state->nInputTapes > 0)
2111 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2127 state->nOutputTapes = 0;
2128 state->nOutputRuns = 0;
2140 elog(
LOG,
"starting merge pass of %d input runs on %d tapes, " INT64_FORMAT " KB of memory for each input tape: %s",
2141 state->nInputRuns,
state->nInputTapes, input_buffer_size / 1024,
2145 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2177 if (
state->nInputRuns == 0 &&
state->nOutputRuns <= 1)
2192 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2218 while (
state->memtupcount > 0)
2223 srcTapeIndex =
state->memtuples[0].srctape;
2224 srcTape =
state->inputTapes[srcTapeIndex];
2228 if (
state->memtuples[0].tuple)
2243 state->nInputRuns--;
2270 for (srcTapeIndex = 0; srcTapeIndex < activeTapes; srcTapeIndex++)
2290 unsigned int tuplen;
2293 if ((tuplen =
getlen(srcTape,
true)) == 0)
2328 if (
state->memtupcount == 0 &&
state->currentRun > 0)
2337 if (
state->currentRun == INT_MAX)
2339 (
errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2340 errmsg(
"cannot have more than %d runs for an external sort",
2343 if (
state->currentRun > 0)
2346 state->currentRun++;
2349 elog(
LOG,
"worker %d starting quicksort of run %d: %s",
2360 elog(
LOG,
"worker %d finished quicksort of run %d: %s",
2364 memtupwrite =
state->memtupcount;
2365 for (
i = 0;
i < memtupwrite;
i++)
2372 state->memtupcount = 0;
2388 state->tupleMem = 0;
2393 elog(
LOG,
"worker %d finished writing run %d to tape %d: %s",
2408 switch (
state->status)
2412 state->eof_reached =
false;
2413 state->markpos_offset = 0;
2414 state->markpos_eof =
false;
2418 state->eof_reached =
false;
2419 state->markpos_block = 0L;
2420 state->markpos_offset = 0;
2421 state->markpos_eof =
false;
2441 switch (
state->status)
2449 &
state->markpos_block,
2450 &
state->markpos_offset);
2472 switch (
state->status)
2480 state->markpos_block,
2481 state->markpos_offset);
2513 if (
state->isMaxSpaceDisk)
2519 switch (
state->maxSpaceStatus)
2522 if (
state->boundUsed)
2548 return "still in progress";
2550 return "top-N heapsort";
2554 return "external sort";
2556 return "external merge";
2589 int tupcount =
state->memtupcount;
2600 state->memtupcount = 0;
2601 for (
i = 0;
i < tupcount;
i++)
2638 int tupcount =
state->memtupcount;
2650 while (
state->memtupcount > 1)
2658 state->memtupcount = tupcount;
2667 state->boundUsed =
true;
2680 if (
state->memtupcount > 1)
2686 if (
state->base.haveDatum1 &&
state->base.sortKeys)
2690 qsort_tuple_unsigned(
state->memtuples,
2695 #if SIZEOF_DATUM >= 8
2696 else if (
state->base.sortKeys[0].comparator == ssup_datum_signed_cmp)
2698 qsort_tuple_signed(
state->memtuples,
2706 qsort_tuple_int32(
state->memtuples,
2714 if (
state->base.onlyKey != NULL)
2716 qsort_ssup(
state->memtuples,
state->memtupcount,
2717 state->base.onlyKey);
2721 qsort_tuple(
state->memtuples,
2723 state->base.comparetup,
2744 memtuples =
state->memtuples;
2753 j =
state->memtupcount++;
2756 int i = (
j - 1) >> 1;
2760 memtuples[
j] = memtuples[
i];
2763 memtuples[
j] = *tuple;
2779 if (--
state->memtupcount <= 0)
2786 tuple = &memtuples[
state->memtupcount];
2813 n =
state->memtupcount;
2817 unsigned int j = 2 *
i + 1;
2826 memtuples[
i] = memtuples[
j];
2829 memtuples[
i] = *tuple;
2843 for (nkey = 0; nkey <
state->base.nKeys; nkey++, sortKey++)
2863 if (
len == 0 && !eofOK)
2871 unsigned int len = 0;
2899 state->slabFreeHead =
buf->nextfree;
2948 shared->
nTapes = nWorkers;
2949 for (
i = 0;
i < nWorkers;
i++)
3024 state->memtuples = NULL;
3025 state->memtupsize = 0;
3072 int nParticipants =
state->nParticipants;
3073 int workersFinished;
3077 Assert(nParticipants >= 1);
3083 if (nParticipants != workersFinished)
3084 elog(
ERROR,
"cannot take over tapes before all workers finish");
3098 state->currentRun = nParticipants;
3108 state->inputTapes = NULL;
3109 state->nInputTapes = 0;
3110 state->nInputRuns = 0;
3113 state->nOutputTapes = nParticipants;
3114 state->nOutputRuns = nParticipants;
3116 for (
j = 0;
j < nParticipants;
j++)
3149 #if SIZEOF_DATUM >= 8
void PrepareTempTablespaces(void)
MemoryContext BumpContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
#define Assert(condition)
#define FLEXIBLE_ARRAY_MEMBER
#define pg_attribute_always_inline
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
static int compare(const void *arg1, const void *arg2)
LogicalTape * LogicalTapeCreate(LogicalTapeSet *lts)
void LogicalTapeRewindForRead(LogicalTape *lt, size_t buffer_size)
void LogicalTapeSetForgetFreeSpace(LogicalTapeSet *lts)
size_t LogicalTapeBackspace(LogicalTape *lt, size_t size)
size_t LogicalTapeRead(LogicalTape *lt, void *ptr, size_t size)
int64 LogicalTapeSetBlocks(LogicalTapeSet *lts)
void LogicalTapeClose(LogicalTape *lt)
void LogicalTapeSetClose(LogicalTapeSet *lts)
void LogicalTapeSeek(LogicalTape *lt, int64 blocknum, int offset)
void LogicalTapeTell(LogicalTape *lt, int64 *blocknum, int *offset)
void LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size)
LogicalTapeSet * LogicalTapeSetCreate(bool preallocate, SharedFileSet *fileset, int worker)
void LogicalTapeFreeze(LogicalTape *lt, TapeShare *share)
LogicalTape * LogicalTapeImport(LogicalTapeSet *lts, int worker, TapeShare *shared)
void MemoryContextReset(MemoryContext context)
void pfree(void *pointer)
Size GetMemoryChunkSpace(void *pointer)
void * palloc0(Size size)
MemoryContext CurrentMemoryContext
void * MemoryContextAlloc(MemoryContext context, Size size)
void MemoryContextDelete(MemoryContext context)
void MemoryContextResetOnly(MemoryContext context)
void * repalloc_huge(void *pointer, Size size)
#define AllocSetContextCreate
#define ALLOCSET_DEFAULT_SIZES
#define CHECK_FOR_INTERRUPTS()
const char * pg_rusage_show(const PGRUsage *ru0)
void pg_rusage_init(PGRUsage *ru0)
static int64 DatumGetInt64(Datum X)
static int32 DatumGetInt32(Datum X)
MemoryContextSwitchTo(old_ctx)
void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg)
void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
Size add_size(Size s1, Size s2)
Size mul_size(Size s1, Size s2)
static int ApplyUnsignedSortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
static int ApplyInt32SortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
#define SpinLockInit(lock)
#define SpinLockRelease(lock)
#define SpinLockAcquire(lock)
TapeShare tapes[FLEXIBLE_ARRAY_MEMBER]
TuplesortMethod sortMethod
TuplesortSpaceType spaceType
LogicalTape ** inputTapes
LogicalTape ** outputTapes
TupSortStatus maxSpaceStatus
LogicalTape * result_tape
void tuplesort_rescan(Tuplesortstate *state)
void tuplesort_performsort(Tuplesortstate *state)
int tuplesort_merge_order(int64 allowedMem)
#define TAPE_BUFFER_OVERHEAD
static void tuplesort_heap_delete_top(Tuplesortstate *state)
#define INITIAL_MEMTUPSIZE
static unsigned int getlen(LogicalTape *tape, bool eofOK)
void tuplesort_initialize_shared(Sharedsort *shared, int nWorkers, dsm_segment *seg)
#define COMPARETUP(state, a, b)
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
static void selectnewtape(Tuplesortstate *state)
void tuplesort_reset(Tuplesortstate *state)
static void markrunend(LogicalTape *tape)
bool tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
#define REMOVEABBREV(state, stup, count)
static void reversedirection(Tuplesortstate *state)
#define USEMEM(state, amt)
static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple)
static bool grow_memtuples(Tuplesortstate *state)
int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup)
static void beginmerge(Tuplesortstate *state)
static void make_bounded_heap(Tuplesortstate *state)
bool tuplesort_used_bound(Tuplesortstate *state)
#define WRITETUP(state, tape, stup)
static void sort_bounded_heap(Tuplesortstate *state)
static int worker_get_identifier(Tuplesortstate *state)
static void mergeonerun(Tuplesortstate *state)
#define FREEMEM(state, amt)
const char * tuplesort_space_type_name(TuplesortSpaceType t)
static void inittapestate(Tuplesortstate *state, int maxTapes)
static void leader_takeover_tapes(Tuplesortstate *state)
Size tuplesort_estimate_shared(int nWorkers)
void tuplesort_get_stats(Tuplesortstate *state, TuplesortInstrumentation *stats)
static void tuplesort_sort_memtuples(Tuplesortstate *state)
void tuplesort_end(Tuplesortstate *state)
static void inittapes(Tuplesortstate *state, bool mergeruns)
void tuplesort_markpos(Tuplesortstate *state)
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
#define MERGE_BUFFER_SIZE
#define READTUP(state, stup, tape, len)
int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup)
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
static int64 merge_read_buffer_size(int64 avail_mem, int nInputTapes, int nInputRuns, int maxOutputTapes)
static bool mergereadnext(Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
static void tuplesort_updatemax(Tuplesortstate *state)
static void worker_freeze_result_tape(Tuplesortstate *state)
#define RELEASE_SLAB_SLOT(state, tuple)
void tuplesort_attach_shared(Sharedsort *shared, dsm_segment *seg)
static void worker_nomergeruns(Tuplesortstate *state)
static pg_attribute_always_inline int qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple)
void tuplesort_restorepos(Tuplesortstate *state)
static pg_attribute_always_inline int qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
static void mergeruns(Tuplesortstate *state)
void * tuplesort_readtup_alloc(Tuplesortstate *state, Size tuplen)
static void tuplesort_begin_batch(Tuplesortstate *state)
void tuplesort_set_bound(Tuplesortstate *state, int64 bound)
static void init_slab_allocator(Tuplesortstate *state, int numSlots)
const char * tuplesort_method_name(TuplesortMethod m)
static bool consider_abort_common(Tuplesortstate *state)
static void tuplesort_free(Tuplesortstate *state)
static void dumptuples(Tuplesortstate *state, bool alltuples)
#define TupleSortUseBumpTupleCxt(opt)
#define TUPLESORT_RANDOMACCESS
#define TUPLESORT_ALLOWBOUNDED
@ SORT_TYPE_EXTERNAL_SORT
@ SORT_TYPE_TOP_N_HEAPSORT
@ SORT_TYPE_STILL_IN_PROGRESS
@ SORT_TYPE_EXTERNAL_MERGE
char buffer[SLAB_SLOT_SIZE]
union SlabSlot * nextfree