122 #define INITIAL_MEMTUPSIZE Max(1024, \
123 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1)
130 #ifdef DEBUG_BOUNDED_SORT
131 bool optimize_bounded_sort =
true;
146 #define SLAB_SLOT_SIZE 1024
182 #define TAPE_BUFFER_OVERHEAD BLCKSZ
183 #define MERGE_BUFFER_SIZE (BLCKSZ * 32)
376 #define IS_SLAB_SLOT(state, tuple) \
377 ((char *) (tuple) >= (state)->slabMemoryBegin && \
378 (char *) (tuple) < (state)->slabMemoryEnd)
384 #define RELEASE_SLAB_SLOT(state, tuple) \
386 SlabSlot *buf = (SlabSlot *) tuple; \
388 if (IS_SLAB_SLOT((state), buf)) \
390 buf->nextfree = (state)->slabFreeHead; \
391 (state)->slabFreeHead = buf; \
396 #define REMOVEABBREV(state,stup,count) ((*(state)->base.removeabbrev) (state, stup, count))
397 #define COMPARETUP(state,a,b) ((*(state)->base.comparetup) (a, b, state))
398 #define WRITETUP(state,tape,stup) ((*(state)->base.writetup) (state, tape, stup))
399 #define READTUP(state,stup,tape,len) ((*(state)->base.readtup) (state, stup, tape, len))
400 #define FREESTATE(state) ((state)->base.freestate ? (*(state)->base.freestate) (state) : (void) 0)
401 #define LACKMEM(state) ((state)->availMem < 0 && !(state)->slabAllocatorUsed)
402 #define USEMEM(state,amt) ((state)->availMem -= (amt))
403 #define FREEMEM(state,amt) ((state)->availMem += (amt))
404 #define SERIAL(state) ((state)->shared == NULL)
405 #define WORKER(state) ((state)->shared && (state)->worker != -1)
406 #define LEADER(state) ((state)->shared && (state)->worker == -1)
504 b->datum1,
b->isnull1,
505 &
state->base.sortKeys[0]);
513 if (
state->base.onlyKey != NULL)
519 #if SIZEOF_DATUM >= 8
526 compare = ApplySignedSortComparator(
a->datum1,
a->isnull1,
527 b->datum1,
b->isnull1,
528 &
state->base.sortKeys[0]);
537 if (
state->base.onlyKey != NULL)
551 b->datum1,
b->isnull1,
552 &
state->base.sortKeys[0]);
561 if (
state->base.onlyKey != NULL)
576 #define ST_SORT qsort_tuple_unsigned
577 #define ST_ELEMENT_TYPE SortTuple
578 #define ST_COMPARE(a, b, state) qsort_tuple_unsigned_compare(a, b, state)
579 #define ST_COMPARE_ARG_TYPE Tuplesortstate
580 #define ST_CHECK_FOR_INTERRUPTS
581 #define ST_SCOPE static
585 #if SIZEOF_DATUM >= 8
586 #define ST_SORT qsort_tuple_signed
587 #define ST_ELEMENT_TYPE SortTuple
588 #define ST_COMPARE(a, b, state) qsort_tuple_signed_compare(a, b, state)
589 #define ST_COMPARE_ARG_TYPE Tuplesortstate
590 #define ST_CHECK_FOR_INTERRUPTS
591 #define ST_SCOPE static
596 #define ST_SORT qsort_tuple_int32
597 #define ST_ELEMENT_TYPE SortTuple
598 #define ST_COMPARE(a, b, state) qsort_tuple_int32_compare(a, b, state)
599 #define ST_COMPARE_ARG_TYPE Tuplesortstate
600 #define ST_CHECK_FOR_INTERRUPTS
601 #define ST_SCOPE static
605 #define ST_SORT qsort_tuple
606 #define ST_ELEMENT_TYPE SortTuple
607 #define ST_COMPARE_RUNTIME_POINTER
608 #define ST_COMPARE_ARG_TYPE Tuplesortstate
609 #define ST_CHECK_FOR_INTERRUPTS
610 #define ST_SCOPE static
615 #define ST_SORT qsort_ssup
616 #define ST_ELEMENT_TYPE SortTuple
617 #define ST_COMPARE(a, b, ssup) \
618 ApplySortComparator((a)->datum1, (a)->isnull1, \
619 (b)->datum1, (b)->isnull1, (ssup))
620 #define ST_COMPARE_ARG_TYPE SortSupportData
621 #define ST_CHECK_FOR_INTERRUPTS
622 #define ST_SCOPE static
655 elog(
ERROR,
"random access disallowed under parallel sort");
691 state->base.sortopt = sortopt;
692 state->base.tuples =
true;
693 state->abbrevNext = 10;
701 state->allowedMem =
Max(workMem, 64) * (int64) 1024;
702 state->base.sortcontext = sortcontext;
703 state->base.maincontext = maincontext;
710 state->memtuples = NULL;
725 state->shared = NULL;
727 state->nParticipants = -1;
734 state->nParticipants = -1;
788 state->bounded =
false;
789 state->boundUsed =
false;
793 state->tapeset = NULL;
795 state->memtupcount = 0;
801 state->growmemtuples =
true;
802 state->slabAllocatorUsed =
false;
806 state->memtuples = NULL;
809 if (
state->memtuples == NULL)
817 elog(
ERROR,
"insufficient memory allowed for sort");
819 state->currentRun = 0;
826 state->result_tape = NULL;
859 #ifdef DEBUG_BOUNDED_SORT
861 if (!optimize_bounded_sort)
866 if (bound > (int64) (INT_MAX / 2))
869 state->bounded =
true;
870 state->bound = (int) bound;
877 state->base.sortKeys->abbrev_converter = NULL;
878 if (
state->base.sortKeys->abbrev_full_comparator)
879 state->base.sortKeys->comparator =
state->base.sortKeys->abbrev_full_comparator;
882 state->base.sortKeys->abbrev_abort = NULL;
883 state->base.sortKeys->abbrev_full_comparator = NULL;
894 return state->boundUsed;
914 spaceUsed = (
state->allowedMem -
state->availMem + 1023) / 1024;
934 elog(
LOG,
"%s of worker %d ended, %ld disk blocks used: %s",
935 SERIAL(
state) ?
"external sort" :
"parallel external sort",
938 elog(
LOG,
"%s of worker %d ended, %ld KB used: %s",
939 SERIAL(
state) ?
"internal sort" :
"unperformed parallel sort",
943 TRACE_POSTGRESQL_SORT_DONE(
state->tapeset != NULL, spaceUsed);
950 TRACE_POSTGRESQL_SORT_DONE(
state->tapeset != NULL, 0L);
1010 isSpaceDisk =
false;
1011 spaceUsed =
state->allowedMem -
state->availMem;
1022 if ((isSpaceDisk && !
state->isMaxSpaceDisk) ||
1023 (isSpaceDisk ==
state->isMaxSpaceDisk && spaceUsed >
state->maxSpace))
1025 state->maxSpace = spaceUsed;
1026 state->isMaxSpaceDisk = isSpaceDisk;
1051 state->lastReturnedTuple = NULL;
1052 state->slabMemoryBegin = NULL;
1053 state->slabMemoryEnd = NULL;
1054 state->slabFreeHead = NULL;
1076 int memtupsize =
state->memtupsize;
1077 int64 memNowUsed =
state->allowedMem -
state->availMem;
1080 if (!
state->growmemtuples)
1084 if (memNowUsed <= state->availMem)
1090 if (memtupsize < INT_MAX / 2)
1091 newmemtupsize = memtupsize * 2;
1094 newmemtupsize = INT_MAX;
1095 state->growmemtuples =
false;
1128 grow_ratio = (double)
state->allowedMem / (
double) memNowUsed;
1129 if (memtupsize * grow_ratio < INT_MAX)
1130 newmemtupsize = (int) (memtupsize * grow_ratio);
1132 newmemtupsize = INT_MAX;
1135 state->growmemtuples =
false;
1139 if (newmemtupsize <= memtupsize)
1152 state->growmemtuples =
false;
1166 if (
state->availMem < (int64) ((newmemtupsize - memtupsize) *
sizeof(
SortTuple)))
1171 state->memtupsize = newmemtupsize;
1177 elog(
ERROR,
"unexpected out-of-memory situation in tuplesort");
1182 state->growmemtuples =
false;
1197 if (tuple->
tuple != NULL)
1215 state->base.sortKeys);
1231 switch (
state->status)
1242 if (
state->memtupcount >=
state->memtupsize - 1)
1247 state->memtuples[
state->memtupcount++] = *tuple;
1261 if (
state->bounded &&
1267 elog(
LOG,
"switching to bounded heapsort at %d tuples: %s",
1325 state->memtuples[
state->memtupcount++] = *tuple;
1343 Assert(
state->base.sortKeys[0].abbrev_converter != NULL);
1344 Assert(
state->base.sortKeys[0].abbrev_abort != NULL);
1345 Assert(
state->base.sortKeys[0].abbrev_full_comparator != NULL);
1354 state->abbrevNext *= 2;
1360 if (!
state->base.sortKeys->abbrev_abort(
state->memtupcount,
1361 state->base.sortKeys))
1368 state->base.sortKeys[0].comparator =
state->base.sortKeys[0].abbrev_full_comparator;
1369 state->base.sortKeys[0].abbrev_converter = NULL;
1371 state->base.sortKeys[0].abbrev_abort = NULL;
1372 state->base.sortKeys[0].abbrev_full_comparator = NULL;
1391 elog(
LOG,
"performsort of worker %d starting: %s",
1395 switch (
state->status)
1430 state->eof_reached =
false;
1431 state->markpos_block = 0L;
1432 state->markpos_offset = 0;
1433 state->markpos_eof =
false;
1446 state->eof_reached =
false;
1447 state->markpos_offset = 0;
1448 state->markpos_eof =
false;
1461 state->eof_reached =
false;
1462 state->markpos_block = 0L;
1463 state->markpos_offset = 0;
1464 state->markpos_eof =
false;
1476 elog(
LOG,
"performsort of worker %d done (except %d-way final merge): %s",
1480 elog(
LOG,
"performsort of worker %d done: %s",
1499 unsigned int tuplen;
1504 switch (
state->status)
1516 state->eof_reached =
true;
1524 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
1530 if (
state->current <= 0)
1537 if (
state->eof_reached)
1538 state->eof_reached =
false;
1542 if (
state->current <= 0)
1545 *stup =
state->memtuples[
state->current - 1];
1558 if (
state->lastReturnedTuple)
1561 state->lastReturnedTuple = NULL;
1566 if (
state->eof_reached)
1569 if ((tuplen =
getlen(
state->result_tape,
true)) != 0)
1584 state->eof_reached =
true;
1595 if (
state->eof_reached)
1603 2 *
sizeof(
unsigned int));
1606 else if (nmoved != 2 *
sizeof(
unsigned int))
1607 elog(
ERROR,
"unexpected tape position");
1608 state->eof_reached =
false;
1617 sizeof(
unsigned int));
1620 else if (nmoved !=
sizeof(
unsigned int))
1621 elog(
ERROR,
"unexpected tape position");
1628 tuplen + 2 *
sizeof(
unsigned int));
1629 if (nmoved == tuplen +
sizeof(
unsigned int))
1640 else if (nmoved != tuplen + 2 *
sizeof(
unsigned int))
1641 elog(
ERROR,
"bogus tuple length in backward scan");
1653 if (nmoved != tuplen)
1654 elog(
ERROR,
"bogus tuple length in backward scan");
1674 if (
state->lastReturnedTuple)
1677 state->lastReturnedTuple = NULL;
1683 if (
state->memtupcount > 0)
1685 int srcTapeIndex =
state->memtuples[0].srctape;
1689 *stup =
state->memtuples[0];
1708 state->nInputRuns--;
1717 newtup.
srctape = srcTapeIndex;
1748 switch (
state->status)
1751 if (
state->memtupcount -
state->current >= ntuples)
1753 state->current += ntuples;
1757 state->eof_reached =
true;
1765 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
1777 while (ntuples-- > 0)
1829 mOrder = allowedMem /
1870 nOutputRuns = (nInputRuns + nInputTapes - 1) / nInputTapes;
1872 nOutputTapes =
Min(nOutputRuns, maxOutputTapes);
1909 elog(
LOG,
"worker %d switching to external sort with %d tapes: %s",
1917 state->shared ? &
state->shared->fileset : NULL,
1920 state->currentRun = 0;
1925 state->inputTapes = NULL;
1926 state->nInputTapes = 0;
1927 state->nInputRuns = 0;
1930 state->nOutputTapes = 0;
1931 state->nOutputRuns = 0;
1991 state->nOutputTapes++;
1992 state->nOutputRuns++;
2001 state->nOutputRuns++;
2017 state->slabMemoryEnd =
state->slabMemoryBegin +
2022 p =
state->slabMemoryBegin;
2023 for (
i = 0;
i < numSlots - 1;
i++)
2032 state->slabMemoryBegin =
state->slabMemoryEnd = NULL;
2033 state->slabFreeHead = NULL;
2035 state->slabAllocatorUsed =
true;
2052 if (
state->base.sortKeys != NULL &&
state->base.sortKeys->abbrev_converter != NULL)
2060 state->base.sortKeys->abbrev_converter = NULL;
2061 state->base.sortKeys->comparator =
state->base.sortKeys->abbrev_full_comparator;
2064 state->base.sortKeys->abbrev_abort = NULL;
2065 state->base.sortKeys->abbrev_full_comparator = NULL;
2080 state->memtuples = NULL;
2095 if (
state->base.tuples)
2122 elog(
LOG,
"worker %d using %zu KB of memory for tape buffers",
2123 state->worker,
state->tape_buffer_mem / 1024);
2134 if (
state->nInputRuns == 0)
2136 int64 input_buffer_size;
2139 if (
state->nInputTapes > 0)
2141 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2157 state->nOutputTapes = 0;
2158 state->nOutputRuns = 0;
2171 elog(
LOG,
"starting merge pass of %d input runs on %d tapes, " INT64_FORMAT " KB of memory for each input tape: %s",
2172 state->nInputRuns,
state->nInputTapes, input_buffer_size / 1024,
2177 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2209 if (
state->nInputRuns == 0 &&
state->nOutputRuns <= 1)
2224 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2250 while (
state->memtupcount > 0)
2255 srcTapeIndex =
state->memtuples[0].srctape;
2256 srcTape =
state->inputTapes[srcTapeIndex];
2260 if (
state->memtuples[0].tuple)
2275 state->nInputRuns--;
2302 for (srcTapeIndex = 0; srcTapeIndex < activeTapes; srcTapeIndex++)
2322 unsigned int tuplen;
2325 if ((tuplen =
getlen(srcTape,
true)) == 0)
2360 if (
state->memtupcount == 0 &&
state->currentRun > 0)
2369 if (
state->currentRun == INT_MAX)
2371 (
errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2372 errmsg(
"cannot have more than %d runs for an external sort",
2375 if (
state->currentRun > 0)
2378 state->currentRun++;
2382 elog(
LOG,
"worker %d starting quicksort of run %d: %s",
2395 elog(
LOG,
"worker %d finished quicksort of run %d: %s",
2400 memtupwrite =
state->memtupcount;
2401 for (
i = 0;
i < memtupwrite;
i++)
2411 if (stup->
tuple != NULL)
2415 state->memtupcount = 0;
2430 elog(
LOG,
"worker %d finished writing run %d to tape %d: %s",
2446 switch (
state->status)
2450 state->eof_reached =
false;
2451 state->markpos_offset = 0;
2452 state->markpos_eof =
false;
2456 state->eof_reached =
false;
2457 state->markpos_block = 0L;
2458 state->markpos_offset = 0;
2459 state->markpos_eof =
false;
2479 switch (
state->status)
2487 &
state->markpos_block,
2488 &
state->markpos_offset);
2510 switch (
state->status)
2518 state->markpos_block,
2519 state->markpos_offset);
2551 if (
state->isMaxSpaceDisk)
2557 switch (
state->maxSpaceStatus)
2560 if (
state->boundUsed)
2586 return "still in progress";
2588 return "top-N heapsort";
2592 return "external sort";
2594 return "external merge";
2627 int tupcount =
state->memtupcount;
2638 state->memtupcount = 0;
2639 for (
i = 0;
i < tupcount;
i++)
2676 int tupcount =
state->memtupcount;
2688 while (
state->memtupcount > 1)
2696 state->memtupcount = tupcount;
2705 state->boundUsed =
true;
2718 if (
state->memtupcount > 1)
2724 if (
state->base.haveDatum1 &&
state->base.sortKeys)
2728 qsort_tuple_unsigned(
state->memtuples,
2733 #if SIZEOF_DATUM >= 8
2734 else if (
state->base.sortKeys[0].comparator == ssup_datum_signed_cmp)
2736 qsort_tuple_signed(
state->memtuples,
2744 qsort_tuple_int32(
state->memtuples,
2752 if (
state->base.onlyKey != NULL)
2754 qsort_ssup(
state->memtuples,
state->memtupcount,
2755 state->base.onlyKey);
2759 qsort_tuple(
state->memtuples,
2761 state->base.comparetup,
2782 memtuples =
state->memtuples;
2791 j =
state->memtupcount++;
2794 int i = (
j - 1) >> 1;
2798 memtuples[
j] = memtuples[
i];
2801 memtuples[
j] = *tuple;
2817 if (--
state->memtupcount <= 0)
2824 tuple = &memtuples[
state->memtupcount];
2851 n =
state->memtupcount;
2855 unsigned int j = 2 *
i + 1;
2864 memtuples[
i] = memtuples[
j];
2867 memtuples[
i] = *tuple;
2881 for (nkey = 0; nkey <
state->base.nKeys; nkey++, sortKey++)
2901 if (
len == 0 && !eofOK)
2909 unsigned int len = 0;
2937 state->slabFreeHead =
buf->nextfree;
2986 shared->
nTapes = nWorkers;
2987 for (
i = 0;
i < nWorkers;
i++)
3062 state->memtuples = NULL;
3063 state->memtupsize = 0;
3110 int nParticipants =
state->nParticipants;
3111 int workersFinished;
3115 Assert(nParticipants >= 1);
3121 if (nParticipants != workersFinished)
3122 elog(
ERROR,
"cannot take over tapes before all workers finish");
3136 state->currentRun = nParticipants;
3146 state->inputTapes = NULL;
3147 state->nInputTapes = 0;
3148 state->nInputRuns = 0;
3151 state->nOutputTapes = nParticipants;
3152 state->nOutputRuns = nParticipants;
3154 for (
j = 0;
j < nParticipants;
j++)
3187 #if SIZEOF_DATUM >= 8
void PrepareTempTablespaces(void)
#define FLEXIBLE_ARRAY_MEMBER
#define pg_attribute_always_inline
elog(ERROR, "%s: %s", p2, msg)
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
MemoryContext GenerationContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
static int compare(const void *arg1, const void *arg2)
Assert(fmt[strlen(fmt) - 1] !='\n')
LogicalTape * LogicalTapeCreate(LogicalTapeSet *lts)
void LogicalTapeRewindForRead(LogicalTape *lt, size_t buffer_size)
void LogicalTapeSetForgetFreeSpace(LogicalTapeSet *lts)
size_t LogicalTapeBackspace(LogicalTape *lt, size_t size)
size_t LogicalTapeRead(LogicalTape *lt, void *ptr, size_t size)
void LogicalTapeClose(LogicalTape *lt)
void LogicalTapeSetClose(LogicalTapeSet *lts)
void LogicalTapeTell(LogicalTape *lt, long *blocknum, int *offset)
void LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size)
void LogicalTapeSeek(LogicalTape *lt, long blocknum, int offset)
LogicalTapeSet * LogicalTapeSetCreate(bool preallocate, SharedFileSet *fileset, int worker)
long LogicalTapeSetBlocks(LogicalTapeSet *lts)
void LogicalTapeFreeze(LogicalTape *lt, TapeShare *share)
LogicalTape * LogicalTapeImport(LogicalTapeSet *lts, int worker, TapeShare *shared)
void MemoryContextReset(MemoryContext context)
void pfree(void *pointer)
Size GetMemoryChunkSpace(void *pointer)
void * palloc0(Size size)
MemoryContext CurrentMemoryContext
void * MemoryContextAlloc(MemoryContext context, Size size)
void MemoryContextDelete(MemoryContext context)
void MemoryContextResetOnly(MemoryContext context)
void * repalloc_huge(void *pointer, Size size)
#define AllocSetContextCreate
#define ALLOCSET_DEFAULT_SIZES
#define CHECK_FOR_INTERRUPTS()
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
const char * pg_rusage_show(const PGRUsage *ru0)
void pg_rusage_init(PGRUsage *ru0)
static int64 DatumGetInt64(Datum X)
static int32 DatumGetInt32(Datum X)
void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg)
void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
Size add_size(Size s1, Size s2)
Size mul_size(Size s1, Size s2)
static int ApplyUnsignedSortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
static int ApplyInt32SortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
#define SpinLockInit(lock)
#define SpinLockRelease(lock)
#define SpinLockAcquire(lock)
TapeShare tapes[FLEXIBLE_ARRAY_MEMBER]
TuplesortMethod sortMethod
TuplesortSpaceType spaceType
LogicalTape ** inputTapes
LogicalTape ** outputTapes
TupSortStatus maxSpaceStatus
LogicalTape * result_tape
void tuplesort_rescan(Tuplesortstate *state)
void tuplesort_performsort(Tuplesortstate *state)
int tuplesort_merge_order(int64 allowedMem)
#define TAPE_BUFFER_OVERHEAD
static void tuplesort_heap_delete_top(Tuplesortstate *state)
#define INITIAL_MEMTUPSIZE
static unsigned int getlen(LogicalTape *tape, bool eofOK)
void tuplesort_initialize_shared(Sharedsort *shared, int nWorkers, dsm_segment *seg)
#define COMPARETUP(state, a, b)
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
static void selectnewtape(Tuplesortstate *state)
void tuplesort_reset(Tuplesortstate *state)
static void markrunend(LogicalTape *tape)
bool tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
#define REMOVEABBREV(state, stup, count)
static void reversedirection(Tuplesortstate *state)
#define USEMEM(state, amt)
static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple)
static bool grow_memtuples(Tuplesortstate *state)
int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup)
static void beginmerge(Tuplesortstate *state)
static void make_bounded_heap(Tuplesortstate *state)
bool tuplesort_used_bound(Tuplesortstate *state)
#define WRITETUP(state, tape, stup)
static void sort_bounded_heap(Tuplesortstate *state)
static int worker_get_identifier(Tuplesortstate *state)
static void mergeonerun(Tuplesortstate *state)
#define FREEMEM(state, amt)
const char * tuplesort_space_type_name(TuplesortSpaceType t)
static void inittapestate(Tuplesortstate *state, int maxTapes)
static void leader_takeover_tapes(Tuplesortstate *state)
Size tuplesort_estimate_shared(int nWorkers)
void tuplesort_get_stats(Tuplesortstate *state, TuplesortInstrumentation *stats)
static void tuplesort_sort_memtuples(Tuplesortstate *state)
void tuplesort_end(Tuplesortstate *state)
static void inittapes(Tuplesortstate *state, bool mergeruns)
void tuplesort_markpos(Tuplesortstate *state)
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev)
#define MERGE_BUFFER_SIZE
#define READTUP(state, stup, tape, len)
int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup)
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
static int64 merge_read_buffer_size(int64 avail_mem, int nInputTapes, int nInputRuns, int maxOutputTapes)
static bool mergereadnext(Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
static void tuplesort_updatemax(Tuplesortstate *state)
static void worker_freeze_result_tape(Tuplesortstate *state)
#define RELEASE_SLAB_SLOT(state, tuple)
void tuplesort_attach_shared(Sharedsort *shared, dsm_segment *seg)
static void worker_nomergeruns(Tuplesortstate *state)
static pg_attribute_always_inline int qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple)
void tuplesort_restorepos(Tuplesortstate *state)
static pg_attribute_always_inline int qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
static void mergeruns(Tuplesortstate *state)
void * tuplesort_readtup_alloc(Tuplesortstate *state, Size tuplen)
static void tuplesort_begin_batch(Tuplesortstate *state)
void tuplesort_set_bound(Tuplesortstate *state, int64 bound)
static void init_slab_allocator(Tuplesortstate *state, int numSlots)
const char * tuplesort_method_name(TuplesortMethod m)
static bool consider_abort_common(Tuplesortstate *state)
static void tuplesort_free(Tuplesortstate *state)
static void dumptuples(Tuplesortstate *state, bool alltuples)
#define TUPLESORT_RANDOMACCESS
#define TUPLESORT_ALLOWBOUNDED
@ SORT_TYPE_EXTERNAL_SORT
@ SORT_TYPE_TOP_N_HEAPSORT
@ SORT_TYPE_STILL_IN_PROGRESS
@ SORT_TYPE_EXTERNAL_MERGE
char buffer[SLAB_SLOT_SIZE]
union SlabSlot * nextfree