122 #define INITIAL_MEMTUPSIZE Max(1024, \
123 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1)
130 #ifdef DEBUG_BOUNDED_SORT
131 bool optimize_bounded_sort =
true;
146 #define SLAB_SLOT_SIZE 1024
182 #define TAPE_BUFFER_OVERHEAD BLCKSZ
183 #define MERGE_BUFFER_SIZE (BLCKSZ * 32)
376 #define IS_SLAB_SLOT(state, tuple) \
377 ((char *) (tuple) >= (state)->slabMemoryBegin && \
378 (char *) (tuple) < (state)->slabMemoryEnd)
384 #define RELEASE_SLAB_SLOT(state, tuple) \
386 SlabSlot *buf = (SlabSlot *) tuple; \
388 if (IS_SLAB_SLOT((state), buf)) \
390 buf->nextfree = (state)->slabFreeHead; \
391 (state)->slabFreeHead = buf; \
396 #define REMOVEABBREV(state,stup,count) ((*(state)->base.removeabbrev) (state, stup, count))
397 #define COMPARETUP(state,a,b) ((*(state)->base.comparetup) (a, b, state))
398 #define WRITETUP(state,tape,stup) ((*(state)->base.writetup) (state, tape, stup))
399 #define READTUP(state,stup,tape,len) ((*(state)->base.readtup) (state, stup, tape, len))
400 #define FREESTATE(state) ((state)->base.freestate ? (*(state)->base.freestate) (state) : (void) 0)
401 #define LACKMEM(state) ((state)->availMem < 0 && !(state)->slabAllocatorUsed)
402 #define USEMEM(state,amt) ((state)->availMem -= (amt))
403 #define FREEMEM(state,amt) ((state)->availMem += (amt))
404 #define SERIAL(state) ((state)->shared == NULL)
405 #define WORKER(state) ((state)->shared && (state)->worker != -1)
406 #define LEADER(state) ((state)->shared && (state)->worker == -1)
501 b->datum1,
b->isnull1,
502 &
state->base.sortKeys[0]);
510 if (
state->base.onlyKey != NULL)
516 #if SIZEOF_DATUM >= 8
523 compare = ApplySignedSortComparator(
a->datum1,
a->isnull1,
524 b->datum1,
b->isnull1,
525 &
state->base.sortKeys[0]);
534 if (
state->base.onlyKey != NULL)
548 b->datum1,
b->isnull1,
549 &
state->base.sortKeys[0]);
558 if (
state->base.onlyKey != NULL)
573 #define ST_SORT qsort_tuple_unsigned
574 #define ST_ELEMENT_TYPE SortTuple
575 #define ST_COMPARE(a, b, state) qsort_tuple_unsigned_compare(a, b, state)
576 #define ST_COMPARE_ARG_TYPE Tuplesortstate
577 #define ST_CHECK_FOR_INTERRUPTS
578 #define ST_SCOPE static
582 #if SIZEOF_DATUM >= 8
583 #define ST_SORT qsort_tuple_signed
584 #define ST_ELEMENT_TYPE SortTuple
585 #define ST_COMPARE(a, b, state) qsort_tuple_signed_compare(a, b, state)
586 #define ST_COMPARE_ARG_TYPE Tuplesortstate
587 #define ST_CHECK_FOR_INTERRUPTS
588 #define ST_SCOPE static
593 #define ST_SORT qsort_tuple_int32
594 #define ST_ELEMENT_TYPE SortTuple
595 #define ST_COMPARE(a, b, state) qsort_tuple_int32_compare(a, b, state)
596 #define ST_COMPARE_ARG_TYPE Tuplesortstate
597 #define ST_CHECK_FOR_INTERRUPTS
598 #define ST_SCOPE static
602 #define ST_SORT qsort_tuple
603 #define ST_ELEMENT_TYPE SortTuple
604 #define ST_COMPARE_RUNTIME_POINTER
605 #define ST_COMPARE_ARG_TYPE Tuplesortstate
606 #define ST_CHECK_FOR_INTERRUPTS
607 #define ST_SCOPE static
612 #define ST_SORT qsort_ssup
613 #define ST_ELEMENT_TYPE SortTuple
614 #define ST_COMPARE(a, b, ssup) \
615 ApplySortComparator((a)->datum1, (a)->isnull1, \
616 (b)->datum1, (b)->isnull1, (ssup))
617 #define ST_COMPARE_ARG_TYPE SortSupportData
618 #define ST_CHECK_FOR_INTERRUPTS
619 #define ST_SCOPE static
652 elog(
ERROR,
"random access disallowed under parallel sort");
688 state->base.sortopt = sortopt;
689 state->base.tuples =
true;
690 state->abbrevNext = 10;
698 state->allowedMem =
Max(workMem, 64) * (int64) 1024;
699 state->base.sortcontext = sortcontext;
700 state->base.maincontext = maincontext;
707 state->memtuples = NULL;
722 state->shared = NULL;
724 state->nParticipants = -1;
731 state->nParticipants = -1;
785 state->bounded =
false;
786 state->boundUsed =
false;
790 state->tapeset = NULL;
792 state->memtupcount = 0;
798 state->growmemtuples =
true;
799 state->slabAllocatorUsed =
false;
803 state->memtuples = NULL;
806 if (
state->memtuples == NULL)
814 elog(
ERROR,
"insufficient memory allowed for sort");
816 state->currentRun = 0;
823 state->result_tape = NULL;
856 #ifdef DEBUG_BOUNDED_SORT
858 if (!optimize_bounded_sort)
863 if (bound > (int64) (INT_MAX / 2))
866 state->bounded =
true;
867 state->bound = (int) bound;
874 state->base.sortKeys->abbrev_converter = NULL;
875 if (
state->base.sortKeys->abbrev_full_comparator)
876 state->base.sortKeys->comparator =
state->base.sortKeys->abbrev_full_comparator;
879 state->base.sortKeys->abbrev_abort = NULL;
880 state->base.sortKeys->abbrev_full_comparator = NULL;
891 return state->boundUsed;
911 spaceUsed = (
state->allowedMem -
state->availMem + 1023) / 1024;
931 elog(
LOG,
"%s of worker %d ended, %ld disk blocks used: %s",
932 SERIAL(
state) ?
"external sort" :
"parallel external sort",
935 elog(
LOG,
"%s of worker %d ended, %ld KB used: %s",
936 SERIAL(
state) ?
"internal sort" :
"unperformed parallel sort",
940 TRACE_POSTGRESQL_SORT_DONE(
state->tapeset != NULL, spaceUsed);
947 TRACE_POSTGRESQL_SORT_DONE(
state->tapeset != NULL, 0L);
1007 isSpaceDisk =
false;
1008 spaceUsed =
state->allowedMem -
state->availMem;
1019 if ((isSpaceDisk && !
state->isMaxSpaceDisk) ||
1020 (isSpaceDisk ==
state->isMaxSpaceDisk && spaceUsed >
state->maxSpace))
1022 state->maxSpace = spaceUsed;
1023 state->isMaxSpaceDisk = isSpaceDisk;
1048 state->lastReturnedTuple = NULL;
1049 state->slabMemoryBegin = NULL;
1050 state->slabMemoryEnd = NULL;
1051 state->slabFreeHead = NULL;
1073 int memtupsize =
state->memtupsize;
1074 int64 memNowUsed =
state->allowedMem -
state->availMem;
1077 if (!
state->growmemtuples)
1081 if (memNowUsed <= state->availMem)
1087 if (memtupsize < INT_MAX / 2)
1088 newmemtupsize = memtupsize * 2;
1091 newmemtupsize = INT_MAX;
1092 state->growmemtuples =
false;
1125 grow_ratio = (double)
state->allowedMem / (
double) memNowUsed;
1126 if (memtupsize * grow_ratio < INT_MAX)
1127 newmemtupsize = (int) (memtupsize * grow_ratio);
1129 newmemtupsize = INT_MAX;
1132 state->growmemtuples =
false;
1136 if (newmemtupsize <= memtupsize)
1149 state->growmemtuples =
false;
1163 if (
state->availMem < (int64) ((newmemtupsize - memtupsize) *
sizeof(
SortTuple)))
1168 state->memtupsize = newmemtupsize;
1174 elog(
ERROR,
"unexpected out-of-memory situation in tuplesort");
1179 state->growmemtuples =
false;
1194 if (tuple->
tuple != NULL)
1212 state->base.sortKeys);
1228 switch (
state->status)
1239 if (
state->memtupcount >=
state->memtupsize - 1)
1244 state->memtuples[
state->memtupcount++] = *tuple;
1258 if (
state->bounded &&
1264 elog(
LOG,
"switching to bounded heapsort at %d tuples: %s",
1322 state->memtuples[
state->memtupcount++] = *tuple;
1340 Assert(
state->base.sortKeys[0].abbrev_converter != NULL);
1341 Assert(
state->base.sortKeys[0].abbrev_abort != NULL);
1342 Assert(
state->base.sortKeys[0].abbrev_full_comparator != NULL);
1351 state->abbrevNext *= 2;
1357 if (!
state->base.sortKeys->abbrev_abort(
state->memtupcount,
1358 state->base.sortKeys))
1365 state->base.sortKeys[0].comparator =
state->base.sortKeys[0].abbrev_full_comparator;
1366 state->base.sortKeys[0].abbrev_converter = NULL;
1368 state->base.sortKeys[0].abbrev_abort = NULL;
1369 state->base.sortKeys[0].abbrev_full_comparator = NULL;
1388 elog(
LOG,
"performsort of worker %d starting: %s",
1392 switch (
state->status)
1427 state->eof_reached =
false;
1428 state->markpos_block = 0L;
1429 state->markpos_offset = 0;
1430 state->markpos_eof =
false;
1443 state->eof_reached =
false;
1444 state->markpos_offset = 0;
1445 state->markpos_eof =
false;
1458 state->eof_reached =
false;
1459 state->markpos_block = 0L;
1460 state->markpos_offset = 0;
1461 state->markpos_eof =
false;
1473 elog(
LOG,
"performsort of worker %d done (except %d-way final merge): %s",
1477 elog(
LOG,
"performsort of worker %d done: %s",
1496 unsigned int tuplen;
1501 switch (
state->status)
1513 state->eof_reached =
true;
1521 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
1527 if (
state->current <= 0)
1534 if (
state->eof_reached)
1535 state->eof_reached =
false;
1539 if (
state->current <= 0)
1542 *stup =
state->memtuples[
state->current - 1];
1555 if (
state->lastReturnedTuple)
1558 state->lastReturnedTuple = NULL;
1563 if (
state->eof_reached)
1566 if ((tuplen =
getlen(
state->result_tape,
true)) != 0)
1581 state->eof_reached =
true;
1592 if (
state->eof_reached)
1600 2 *
sizeof(
unsigned int));
1603 else if (nmoved != 2 *
sizeof(
unsigned int))
1604 elog(
ERROR,
"unexpected tape position");
1605 state->eof_reached =
false;
1614 sizeof(
unsigned int));
1617 else if (nmoved !=
sizeof(
unsigned int))
1618 elog(
ERROR,
"unexpected tape position");
1625 tuplen + 2 *
sizeof(
unsigned int));
1626 if (nmoved == tuplen +
sizeof(
unsigned int))
1637 else if (nmoved != tuplen + 2 *
sizeof(
unsigned int))
1638 elog(
ERROR,
"bogus tuple length in backward scan");
1650 if (nmoved != tuplen)
1651 elog(
ERROR,
"bogus tuple length in backward scan");
1671 if (
state->lastReturnedTuple)
1674 state->lastReturnedTuple = NULL;
1680 if (
state->memtupcount > 0)
1682 int srcTapeIndex =
state->memtuples[0].srctape;
1686 *stup =
state->memtuples[0];
1705 state->nInputRuns--;
1714 newtup.
srctape = srcTapeIndex;
1745 switch (
state->status)
1748 if (
state->memtupcount -
state->current >= ntuples)
1750 state->current += ntuples;
1754 state->eof_reached =
true;
1762 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
1774 while (ntuples-- > 0)
1826 mOrder = allowedMem /
1867 nOutputRuns = (nInputRuns + nInputTapes - 1) / nInputTapes;
1869 nOutputTapes =
Min(nOutputRuns, maxOutputTapes);
1906 elog(
LOG,
"worker %d switching to external sort with %d tapes: %s",
1914 state->shared ? &
state->shared->fileset : NULL,
1917 state->currentRun = 0;
1922 state->inputTapes = NULL;
1923 state->nInputTapes = 0;
1924 state->nInputRuns = 0;
1927 state->nOutputTapes = 0;
1928 state->nOutputRuns = 0;
1988 state->nOutputTapes++;
1989 state->nOutputRuns++;
1998 state->nOutputRuns++;
2014 state->slabMemoryEnd =
state->slabMemoryBegin +
2019 p =
state->slabMemoryBegin;
2020 for (
i = 0;
i < numSlots - 1;
i++)
2029 state->slabMemoryBegin =
state->slabMemoryEnd = NULL;
2030 state->slabFreeHead = NULL;
2032 state->slabAllocatorUsed =
true;
2049 if (
state->base.sortKeys != NULL &&
state->base.sortKeys->abbrev_converter != NULL)
2057 state->base.sortKeys->abbrev_converter = NULL;
2058 state->base.sortKeys->comparator =
state->base.sortKeys->abbrev_full_comparator;
2061 state->base.sortKeys->abbrev_abort = NULL;
2062 state->base.sortKeys->abbrev_full_comparator = NULL;
2077 state->memtuples = NULL;
2092 if (
state->base.tuples)
2119 elog(
LOG,
"worker %d using %zu KB of memory for tape buffers",
2120 state->worker,
state->tape_buffer_mem / 1024);
2131 if (
state->nInputRuns == 0)
2133 int64 input_buffer_size;
2136 if (
state->nInputTapes > 0)
2138 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2154 state->nOutputTapes = 0;
2155 state->nOutputRuns = 0;
2168 elog(
LOG,
"starting merge pass of %d input runs on %d tapes, " INT64_FORMAT " KB of memory for each input tape: %s",
2169 state->nInputRuns,
state->nInputTapes, input_buffer_size / 1024,
2174 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2206 if (
state->nInputRuns == 0 &&
state->nOutputRuns <= 1)
2221 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2247 while (
state->memtupcount > 0)
2252 srcTapeIndex =
state->memtuples[0].srctape;
2253 srcTape =
state->inputTapes[srcTapeIndex];
2257 if (
state->memtuples[0].tuple)
2272 state->nInputRuns--;
2299 for (srcTapeIndex = 0; srcTapeIndex < activeTapes; srcTapeIndex++)
2319 unsigned int tuplen;
2322 if ((tuplen =
getlen(srcTape,
true)) == 0)
2357 if (
state->memtupcount == 0 &&
state->currentRun > 0)
2366 if (
state->currentRun == INT_MAX)
2368 (
errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2369 errmsg(
"cannot have more than %d runs for an external sort",
2372 if (
state->currentRun > 0)
2375 state->currentRun++;
2379 elog(
LOG,
"worker %d starting quicksort of run %d: %s",
2392 elog(
LOG,
"worker %d finished quicksort of run %d: %s",
2397 memtupwrite =
state->memtupcount;
2398 for (
i = 0;
i < memtupwrite;
i++)
2408 if (stup->
tuple != NULL)
2412 state->memtupcount = 0;
2427 elog(
LOG,
"worker %d finished writing run %d to tape %d: %s",
2443 switch (
state->status)
2447 state->eof_reached =
false;
2448 state->markpos_offset = 0;
2449 state->markpos_eof =
false;
2453 state->eof_reached =
false;
2454 state->markpos_block = 0L;
2455 state->markpos_offset = 0;
2456 state->markpos_eof =
false;
2476 switch (
state->status)
2484 &
state->markpos_block,
2485 &
state->markpos_offset);
2507 switch (
state->status)
2515 state->markpos_block,
2516 state->markpos_offset);
2548 if (
state->isMaxSpaceDisk)
2554 switch (
state->maxSpaceStatus)
2557 if (
state->boundUsed)
2583 return "still in progress";
2585 return "top-N heapsort";
2589 return "external sort";
2591 return "external merge";
2624 int tupcount =
state->memtupcount;
2635 state->memtupcount = 0;
2636 for (
i = 0;
i < tupcount;
i++)
2673 int tupcount =
state->memtupcount;
2685 while (
state->memtupcount > 1)
2693 state->memtupcount = tupcount;
2702 state->boundUsed =
true;
2715 if (
state->memtupcount > 1)
2721 if (
state->base.haveDatum1 &&
state->base.sortKeys)
2725 qsort_tuple_unsigned(
state->memtuples,
2730 #if SIZEOF_DATUM >= 8
2731 else if (
state->base.sortKeys[0].comparator == ssup_datum_signed_cmp)
2733 qsort_tuple_signed(
state->memtuples,
2741 qsort_tuple_int32(
state->memtuples,
2749 if (
state->base.onlyKey != NULL)
2751 qsort_ssup(
state->memtuples,
state->memtupcount,
2752 state->base.onlyKey);
2756 qsort_tuple(
state->memtuples,
2758 state->base.comparetup,
2779 memtuples =
state->memtuples;
2788 j =
state->memtupcount++;
2791 int i = (
j - 1) >> 1;
2795 memtuples[
j] = memtuples[
i];
2798 memtuples[
j] = *tuple;
2814 if (--
state->memtupcount <= 0)
2821 tuple = &memtuples[
state->memtupcount];
2848 n =
state->memtupcount;
2852 unsigned int j = 2 *
i + 1;
2861 memtuples[
i] = memtuples[
j];
2864 memtuples[
i] = *tuple;
2878 for (nkey = 0; nkey <
state->base.nKeys; nkey++, sortKey++)
2898 if (
len == 0 && !eofOK)
2906 unsigned int len = 0;
2934 state->slabFreeHead =
buf->nextfree;
2983 shared->
nTapes = nWorkers;
2984 for (
i = 0;
i < nWorkers;
i++)
3059 state->memtuples = NULL;
3060 state->memtupsize = 0;
3107 int nParticipants =
state->nParticipants;
3108 int workersFinished;
3112 Assert(nParticipants >= 1);
3118 if (nParticipants != workersFinished)
3119 elog(
ERROR,
"cannot take over tapes before all workers finish");
3133 state->currentRun = nParticipants;
3143 state->inputTapes = NULL;
3144 state->nInputTapes = 0;
3145 state->nInputRuns = 0;
3148 state->nOutputTapes = nParticipants;
3149 state->nOutputRuns = nParticipants;
3151 for (
j = 0;
j < nParticipants;
j++)
3184 #if SIZEOF_DATUM >= 8
void PrepareTempTablespaces(void)
#define FLEXIBLE_ARRAY_MEMBER
#define pg_attribute_always_inline
elog(ERROR, "%s: %s", p2, msg)
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
MemoryContext GenerationContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
static int compare(const void *arg1, const void *arg2)
Assert(fmt[strlen(fmt) - 1] !='\n')
LogicalTape * LogicalTapeCreate(LogicalTapeSet *lts)
void LogicalTapeRewindForRead(LogicalTape *lt, size_t buffer_size)
void LogicalTapeSetForgetFreeSpace(LogicalTapeSet *lts)
size_t LogicalTapeBackspace(LogicalTape *lt, size_t size)
size_t LogicalTapeRead(LogicalTape *lt, void *ptr, size_t size)
void LogicalTapeClose(LogicalTape *lt)
void LogicalTapeSetClose(LogicalTapeSet *lts)
void LogicalTapeTell(LogicalTape *lt, long *blocknum, int *offset)
void LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size)
void LogicalTapeSeek(LogicalTape *lt, long blocknum, int offset)
LogicalTapeSet * LogicalTapeSetCreate(bool preallocate, SharedFileSet *fileset, int worker)
long LogicalTapeSetBlocks(LogicalTapeSet *lts)
void LogicalTapeFreeze(LogicalTape *lt, TapeShare *share)
LogicalTape * LogicalTapeImport(LogicalTapeSet *lts, int worker, TapeShare *shared)
void MemoryContextReset(MemoryContext context)
void pfree(void *pointer)
Size GetMemoryChunkSpace(void *pointer)
void * palloc0(Size size)
MemoryContext CurrentMemoryContext
void * MemoryContextAlloc(MemoryContext context, Size size)
void MemoryContextDelete(MemoryContext context)
void MemoryContextResetOnly(MemoryContext context)
void * repalloc_huge(void *pointer, Size size)
#define AllocSetContextCreate
#define ALLOCSET_DEFAULT_SIZES
#define CHECK_FOR_INTERRUPTS()
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
const char * pg_rusage_show(const PGRUsage *ru0)
void pg_rusage_init(PGRUsage *ru0)
static int64 DatumGetInt64(Datum X)
static int32 DatumGetInt32(Datum X)
void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg)
void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
Size add_size(Size s1, Size s2)
Size mul_size(Size s1, Size s2)
static int ApplyUnsignedSortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
static int ApplyInt32SortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
#define SpinLockInit(lock)
#define SpinLockRelease(lock)
#define SpinLockAcquire(lock)
TapeShare tapes[FLEXIBLE_ARRAY_MEMBER]
TuplesortMethod sortMethod
TuplesortSpaceType spaceType
LogicalTape ** inputTapes
LogicalTape ** outputTapes
TupSortStatus maxSpaceStatus
LogicalTape * result_tape
void tuplesort_rescan(Tuplesortstate *state)
void tuplesort_performsort(Tuplesortstate *state)
int tuplesort_merge_order(int64 allowedMem)
#define TAPE_BUFFER_OVERHEAD
static void tuplesort_heap_delete_top(Tuplesortstate *state)
#define INITIAL_MEMTUPSIZE
static unsigned int getlen(LogicalTape *tape, bool eofOK)
void tuplesort_initialize_shared(Sharedsort *shared, int nWorkers, dsm_segment *seg)
#define COMPARETUP(state, a, b)
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
static void selectnewtape(Tuplesortstate *state)
void tuplesort_reset(Tuplesortstate *state)
static void markrunend(LogicalTape *tape)
bool tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
#define REMOVEABBREV(state, stup, count)
static void reversedirection(Tuplesortstate *state)
#define USEMEM(state, amt)
static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple)
static bool grow_memtuples(Tuplesortstate *state)
int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup)
static void beginmerge(Tuplesortstate *state)
static void make_bounded_heap(Tuplesortstate *state)
bool tuplesort_used_bound(Tuplesortstate *state)
#define WRITETUP(state, tape, stup)
static void sort_bounded_heap(Tuplesortstate *state)
static int worker_get_identifier(Tuplesortstate *state)
static void mergeonerun(Tuplesortstate *state)
#define FREEMEM(state, amt)
const char * tuplesort_space_type_name(TuplesortSpaceType t)
static void inittapestate(Tuplesortstate *state, int maxTapes)
static void leader_takeover_tapes(Tuplesortstate *state)
Size tuplesort_estimate_shared(int nWorkers)
void tuplesort_get_stats(Tuplesortstate *state, TuplesortInstrumentation *stats)
static void tuplesort_sort_memtuples(Tuplesortstate *state)
void tuplesort_end(Tuplesortstate *state)
static void inittapes(Tuplesortstate *state, bool mergeruns)
void tuplesort_markpos(Tuplesortstate *state)
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev)
#define MERGE_BUFFER_SIZE
#define READTUP(state, stup, tape, len)
int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup)
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
static int64 merge_read_buffer_size(int64 avail_mem, int nInputTapes, int nInputRuns, int maxOutputTapes)
static bool mergereadnext(Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
static void tuplesort_updatemax(Tuplesortstate *state)
static void worker_freeze_result_tape(Tuplesortstate *state)
#define RELEASE_SLAB_SLOT(state, tuple)
void tuplesort_attach_shared(Sharedsort *shared, dsm_segment *seg)
static void worker_nomergeruns(Tuplesortstate *state)
static pg_attribute_always_inline int qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple)
void tuplesort_restorepos(Tuplesortstate *state)
static pg_attribute_always_inline int qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
static void mergeruns(Tuplesortstate *state)
void * tuplesort_readtup_alloc(Tuplesortstate *state, Size tuplen)
static void tuplesort_begin_batch(Tuplesortstate *state)
void tuplesort_set_bound(Tuplesortstate *state, int64 bound)
static void init_slab_allocator(Tuplesortstate *state, int numSlots)
const char * tuplesort_method_name(TuplesortMethod m)
static bool consider_abort_common(Tuplesortstate *state)
static void tuplesort_free(Tuplesortstate *state)
static void dumptuples(Tuplesortstate *state, bool alltuples)
#define TUPLESORT_RANDOMACCESS
#define TUPLESORT_ALLOWBOUNDED
@ SORT_TYPE_EXTERNAL_SORT
@ SORT_TYPE_TOP_N_HEAPSORT
@ SORT_TYPE_STILL_IN_PROGRESS
@ SORT_TYPE_EXTERNAL_MERGE
char buffer[SLAB_SLOT_SIZE]
union SlabSlot * nextfree