119 #define INITIAL_MEMTUPSIZE Max(1024, \
120 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1)
127 #ifdef DEBUG_BOUNDED_SORT
128 bool optimize_bounded_sort =
true;
143 #define SLAB_SLOT_SIZE 1024
179 #define TAPE_BUFFER_OVERHEAD BLCKSZ
180 #define MERGE_BUFFER_SIZE (BLCKSZ * 32)
373 #define IS_SLAB_SLOT(state, tuple) \
374 ((char *) (tuple) >= (state)->slabMemoryBegin && \
375 (char *) (tuple) < (state)->slabMemoryEnd)
381 #define RELEASE_SLAB_SLOT(state, tuple) \
383 SlabSlot *buf = (SlabSlot *) tuple; \
385 if (IS_SLAB_SLOT((state), buf)) \
387 buf->nextfree = (state)->slabFreeHead; \
388 (state)->slabFreeHead = buf; \
393 #define REMOVEABBREV(state,stup,count) ((*(state)->base.removeabbrev) (state, stup, count))
394 #define COMPARETUP(state,a,b) ((*(state)->base.comparetup) (a, b, state))
395 #define WRITETUP(state,tape,stup) ((*(state)->base.writetup) (state, tape, stup))
396 #define READTUP(state,stup,tape,len) ((*(state)->base.readtup) (state, stup, tape, len))
397 #define FREESTATE(state) ((state)->base.freestate ? (*(state)->base.freestate) (state) : (void) 0)
398 #define LACKMEM(state) ((state)->availMem < 0 && !(state)->slabAllocatorUsed)
399 #define USEMEM(state,amt) ((state)->availMem -= (amt))
400 #define FREEMEM(state,amt) ((state)->availMem += (amt))
401 #define SERIAL(state) ((state)->shared == NULL)
402 #define WORKER(state) ((state)->shared && (state)->worker != -1)
403 #define LEADER(state) ((state)->shared && (state)->worker == -1)
498 b->datum1,
b->isnull1,
499 &
state->base.sortKeys[0]);
507 if (
state->base.onlyKey != NULL)
513 #if SIZEOF_DATUM >= 8
520 compare = ApplySignedSortComparator(
a->datum1,
a->isnull1,
521 b->datum1,
b->isnull1,
522 &
state->base.sortKeys[0]);
531 if (
state->base.onlyKey != NULL)
545 b->datum1,
b->isnull1,
546 &
state->base.sortKeys[0]);
555 if (
state->base.onlyKey != NULL)
570 #define ST_SORT qsort_tuple_unsigned
571 #define ST_ELEMENT_TYPE SortTuple
572 #define ST_COMPARE(a, b, state) qsort_tuple_unsigned_compare(a, b, state)
573 #define ST_COMPARE_ARG_TYPE Tuplesortstate
574 #define ST_CHECK_FOR_INTERRUPTS
575 #define ST_SCOPE static
579 #if SIZEOF_DATUM >= 8
580 #define ST_SORT qsort_tuple_signed
581 #define ST_ELEMENT_TYPE SortTuple
582 #define ST_COMPARE(a, b, state) qsort_tuple_signed_compare(a, b, state)
583 #define ST_COMPARE_ARG_TYPE Tuplesortstate
584 #define ST_CHECK_FOR_INTERRUPTS
585 #define ST_SCOPE static
590 #define ST_SORT qsort_tuple_int32
591 #define ST_ELEMENT_TYPE SortTuple
592 #define ST_COMPARE(a, b, state) qsort_tuple_int32_compare(a, b, state)
593 #define ST_COMPARE_ARG_TYPE Tuplesortstate
594 #define ST_CHECK_FOR_INTERRUPTS
595 #define ST_SCOPE static
599 #define ST_SORT qsort_tuple
600 #define ST_ELEMENT_TYPE SortTuple
601 #define ST_COMPARE_RUNTIME_POINTER
602 #define ST_COMPARE_ARG_TYPE Tuplesortstate
603 #define ST_CHECK_FOR_INTERRUPTS
604 #define ST_SCOPE static
609 #define ST_SORT qsort_ssup
610 #define ST_ELEMENT_TYPE SortTuple
611 #define ST_COMPARE(a, b, ssup) \
612 ApplySortComparator((a)->datum1, (a)->isnull1, \
613 (b)->datum1, (b)->isnull1, (ssup))
614 #define ST_COMPARE_ARG_TYPE SortSupportData
615 #define ST_CHECK_FOR_INTERRUPTS
616 #define ST_SCOPE static
649 elog(
ERROR,
"random access disallowed under parallel sort");
685 state->base.sortopt = sortopt;
686 state->base.tuples =
true;
687 state->abbrevNext = 10;
695 state->allowedMem =
Max(workMem, 64) * (int64) 1024;
696 state->base.sortcontext = sortcontext;
697 state->base.maincontext = maincontext;
704 state->memtuples = NULL;
719 state->shared = NULL;
721 state->nParticipants = -1;
728 state->nParticipants = -1;
782 state->bounded =
false;
783 state->boundUsed =
false;
787 state->tapeset = NULL;
789 state->memtupcount = 0;
795 state->growmemtuples =
true;
796 state->slabAllocatorUsed =
false;
800 state->memtuples = NULL;
803 if (
state->memtuples == NULL)
811 elog(
ERROR,
"insufficient memory allowed for sort");
813 state->currentRun = 0;
820 state->result_tape = NULL;
853 #ifdef DEBUG_BOUNDED_SORT
855 if (!optimize_bounded_sort)
860 if (bound > (int64) (INT_MAX / 2))
863 state->bounded =
true;
864 state->bound = (int) bound;
871 state->base.sortKeys->abbrev_converter = NULL;
872 if (
state->base.sortKeys->abbrev_full_comparator)
873 state->base.sortKeys->comparator =
state->base.sortKeys->abbrev_full_comparator;
876 state->base.sortKeys->abbrev_abort = NULL;
877 state->base.sortKeys->abbrev_full_comparator = NULL;
888 return state->boundUsed;
908 spaceUsed = (
state->allowedMem -
state->availMem + 1023) / 1024;
928 elog(
LOG,
"%s of worker %d ended, %lld disk blocks used: %s",
929 SERIAL(
state) ?
"external sort" :
"parallel external sort",
932 elog(
LOG,
"%s of worker %d ended, %lld KB used: %s",
933 SERIAL(
state) ?
"internal sort" :
"unperformed parallel sort",
937 TRACE_POSTGRESQL_SORT_DONE(
state->tapeset != NULL, spaceUsed);
944 TRACE_POSTGRESQL_SORT_DONE(
state->tapeset != NULL, 0L);
1004 isSpaceDisk =
false;
1005 spaceUsed =
state->allowedMem -
state->availMem;
1016 if ((isSpaceDisk && !
state->isMaxSpaceDisk) ||
1017 (isSpaceDisk ==
state->isMaxSpaceDisk && spaceUsed >
state->maxSpace))
1019 state->maxSpace = spaceUsed;
1020 state->isMaxSpaceDisk = isSpaceDisk;
1045 state->lastReturnedTuple = NULL;
1046 state->slabMemoryBegin = NULL;
1047 state->slabMemoryEnd = NULL;
1048 state->slabFreeHead = NULL;
1070 int memtupsize =
state->memtupsize;
1071 int64 memNowUsed =
state->allowedMem -
state->availMem;
1074 if (!
state->growmemtuples)
1078 if (memNowUsed <= state->availMem)
1084 if (memtupsize < INT_MAX / 2)
1085 newmemtupsize = memtupsize * 2;
1088 newmemtupsize = INT_MAX;
1089 state->growmemtuples =
false;
1122 grow_ratio = (double)
state->allowedMem / (
double) memNowUsed;
1123 if (memtupsize * grow_ratio < INT_MAX)
1124 newmemtupsize = (int) (memtupsize * grow_ratio);
1126 newmemtupsize = INT_MAX;
1129 state->growmemtuples =
false;
1133 if (newmemtupsize <= memtupsize)
1146 state->growmemtuples =
false;
1160 if (
state->availMem < (int64) ((newmemtupsize - memtupsize) *
sizeof(
SortTuple)))
1165 state->memtupsize = newmemtupsize;
1171 elog(
ERROR,
"unexpected out-of-memory situation in tuplesort");
1176 state->growmemtuples =
false;
1191 if (tuple->
tuple != NULL)
1209 state->base.sortKeys);
1225 switch (
state->status)
1236 if (
state->memtupcount >=
state->memtupsize - 1)
1241 state->memtuples[
state->memtupcount++] = *tuple;
1255 if (
state->bounded &&
1261 elog(
LOG,
"switching to bounded heapsort at %d tuples: %s",
1319 state->memtuples[
state->memtupcount++] = *tuple;
1337 Assert(
state->base.sortKeys[0].abbrev_converter != NULL);
1338 Assert(
state->base.sortKeys[0].abbrev_abort != NULL);
1339 Assert(
state->base.sortKeys[0].abbrev_full_comparator != NULL);
1348 state->abbrevNext *= 2;
1354 if (!
state->base.sortKeys->abbrev_abort(
state->memtupcount,
1355 state->base.sortKeys))
1362 state->base.sortKeys[0].comparator =
state->base.sortKeys[0].abbrev_full_comparator;
1363 state->base.sortKeys[0].abbrev_converter = NULL;
1365 state->base.sortKeys[0].abbrev_abort = NULL;
1366 state->base.sortKeys[0].abbrev_full_comparator = NULL;
1385 elog(
LOG,
"performsort of worker %d starting: %s",
1389 switch (
state->status)
1424 state->eof_reached =
false;
1425 state->markpos_block = 0L;
1426 state->markpos_offset = 0;
1427 state->markpos_eof =
false;
1440 state->eof_reached =
false;
1441 state->markpos_offset = 0;
1442 state->markpos_eof =
false;
1455 state->eof_reached =
false;
1456 state->markpos_block = 0L;
1457 state->markpos_offset = 0;
1458 state->markpos_eof =
false;
1470 elog(
LOG,
"performsort of worker %d done (except %d-way final merge): %s",
1474 elog(
LOG,
"performsort of worker %d done: %s",
1493 unsigned int tuplen;
1498 switch (
state->status)
1510 state->eof_reached =
true;
1518 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
1524 if (
state->current <= 0)
1531 if (
state->eof_reached)
1532 state->eof_reached =
false;
1536 if (
state->current <= 0)
1539 *stup =
state->memtuples[
state->current - 1];
1552 if (
state->lastReturnedTuple)
1555 state->lastReturnedTuple = NULL;
1560 if (
state->eof_reached)
1563 if ((tuplen =
getlen(
state->result_tape,
true)) != 0)
1578 state->eof_reached =
true;
1589 if (
state->eof_reached)
1597 2 *
sizeof(
unsigned int));
1600 else if (nmoved != 2 *
sizeof(
unsigned int))
1601 elog(
ERROR,
"unexpected tape position");
1602 state->eof_reached =
false;
1611 sizeof(
unsigned int));
1614 else if (nmoved !=
sizeof(
unsigned int))
1615 elog(
ERROR,
"unexpected tape position");
1622 tuplen + 2 *
sizeof(
unsigned int));
1623 if (nmoved == tuplen +
sizeof(
unsigned int))
1634 else if (nmoved != tuplen + 2 *
sizeof(
unsigned int))
1635 elog(
ERROR,
"bogus tuple length in backward scan");
1647 if (nmoved != tuplen)
1648 elog(
ERROR,
"bogus tuple length in backward scan");
1668 if (
state->lastReturnedTuple)
1671 state->lastReturnedTuple = NULL;
1677 if (
state->memtupcount > 0)
1679 int srcTapeIndex =
state->memtuples[0].srctape;
1683 *stup =
state->memtuples[0];
1702 state->nInputRuns--;
1711 newtup.
srctape = srcTapeIndex;
1742 switch (
state->status)
1745 if (
state->memtupcount -
state->current >= ntuples)
1747 state->current += ntuples;
1751 state->eof_reached =
true;
1759 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
1771 while (ntuples-- > 0)
1823 mOrder = allowedMem /
1864 nOutputRuns = (nInputRuns + nInputTapes - 1) / nInputTapes;
1866 nOutputTapes =
Min(nOutputRuns, maxOutputTapes);
1903 elog(
LOG,
"worker %d switching to external sort with %d tapes: %s",
1911 state->shared ? &
state->shared->fileset : NULL,
1914 state->currentRun = 0;
1919 state->inputTapes = NULL;
1920 state->nInputTapes = 0;
1921 state->nInputRuns = 0;
1924 state->nOutputTapes = 0;
1925 state->nOutputRuns = 0;
1985 state->nOutputTapes++;
1986 state->nOutputRuns++;
1995 state->nOutputRuns++;
2011 state->slabMemoryEnd =
state->slabMemoryBegin +
2016 p =
state->slabMemoryBegin;
2017 for (
i = 0;
i < numSlots - 1;
i++)
2026 state->slabMemoryBegin =
state->slabMemoryEnd = NULL;
2027 state->slabFreeHead = NULL;
2029 state->slabAllocatorUsed =
true;
2046 if (
state->base.sortKeys != NULL &&
state->base.sortKeys->abbrev_converter != NULL)
2054 state->base.sortKeys->abbrev_converter = NULL;
2055 state->base.sortKeys->comparator =
state->base.sortKeys->abbrev_full_comparator;
2058 state->base.sortKeys->abbrev_abort = NULL;
2059 state->base.sortKeys->abbrev_full_comparator = NULL;
2074 state->memtuples = NULL;
2089 if (
state->base.tuples)
2116 elog(
LOG,
"worker %d using %zu KB of memory for tape buffers",
2117 state->worker,
state->tape_buffer_mem / 1024);
2128 if (
state->nInputRuns == 0)
2130 int64 input_buffer_size;
2133 if (
state->nInputTapes > 0)
2135 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2151 state->nOutputTapes = 0;
2152 state->nOutputRuns = 0;
2165 elog(
LOG,
"starting merge pass of %d input runs on %d tapes, " INT64_FORMAT " KB of memory for each input tape: %s",
2166 state->nInputRuns,
state->nInputTapes, input_buffer_size / 1024,
2171 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2203 if (
state->nInputRuns == 0 &&
state->nOutputRuns <= 1)
2218 for (tapenum = 0; tapenum <
state->nInputTapes; tapenum++)
2244 while (
state->memtupcount > 0)
2249 srcTapeIndex =
state->memtuples[0].srctape;
2250 srcTape =
state->inputTapes[srcTapeIndex];
2254 if (
state->memtuples[0].tuple)
2269 state->nInputRuns--;
2296 for (srcTapeIndex = 0; srcTapeIndex < activeTapes; srcTapeIndex++)
2316 unsigned int tuplen;
2319 if ((tuplen =
getlen(srcTape,
true)) == 0)
2354 if (
state->memtupcount == 0 &&
state->currentRun > 0)
2363 if (
state->currentRun == INT_MAX)
2365 (
errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2366 errmsg(
"cannot have more than %d runs for an external sort",
2369 if (
state->currentRun > 0)
2372 state->currentRun++;
2376 elog(
LOG,
"worker %d starting quicksort of run %d: %s",
2389 elog(
LOG,
"worker %d finished quicksort of run %d: %s",
2394 memtupwrite =
state->memtupcount;
2395 for (
i = 0;
i < memtupwrite;
i++)
2405 if (stup->
tuple != NULL)
2409 state->memtupcount = 0;
2424 elog(
LOG,
"worker %d finished writing run %d to tape %d: %s",
2440 switch (
state->status)
2444 state->eof_reached =
false;
2445 state->markpos_offset = 0;
2446 state->markpos_eof =
false;
2450 state->eof_reached =
false;
2451 state->markpos_block = 0L;
2452 state->markpos_offset = 0;
2453 state->markpos_eof =
false;
2473 switch (
state->status)
2481 &
state->markpos_block,
2482 &
state->markpos_offset);
2504 switch (
state->status)
2512 state->markpos_block,
2513 state->markpos_offset);
2545 if (
state->isMaxSpaceDisk)
2551 switch (
state->maxSpaceStatus)
2554 if (
state->boundUsed)
2580 return "still in progress";
2582 return "top-N heapsort";
2586 return "external sort";
2588 return "external merge";
2621 int tupcount =
state->memtupcount;
2632 state->memtupcount = 0;
2633 for (
i = 0;
i < tupcount;
i++)
2670 int tupcount =
state->memtupcount;
2682 while (
state->memtupcount > 1)
2690 state->memtupcount = tupcount;
2699 state->boundUsed =
true;
2712 if (
state->memtupcount > 1)
2718 if (
state->base.haveDatum1 &&
state->base.sortKeys)
2722 qsort_tuple_unsigned(
state->memtuples,
2727 #if SIZEOF_DATUM >= 8
2728 else if (
state->base.sortKeys[0].comparator == ssup_datum_signed_cmp)
2730 qsort_tuple_signed(
state->memtuples,
2738 qsort_tuple_int32(
state->memtuples,
2746 if (
state->base.onlyKey != NULL)
2748 qsort_ssup(
state->memtuples,
state->memtupcount,
2749 state->base.onlyKey);
2753 qsort_tuple(
state->memtuples,
2755 state->base.comparetup,
2776 memtuples =
state->memtuples;
2785 j =
state->memtupcount++;
2788 int i = (
j - 1) >> 1;
2792 memtuples[
j] = memtuples[
i];
2795 memtuples[
j] = *tuple;
2811 if (--
state->memtupcount <= 0)
2818 tuple = &memtuples[
state->memtupcount];
2845 n =
state->memtupcount;
2849 unsigned int j = 2 *
i + 1;
2858 memtuples[
i] = memtuples[
j];
2861 memtuples[
i] = *tuple;
2875 for (nkey = 0; nkey <
state->base.nKeys; nkey++, sortKey++)
2895 if (
len == 0 && !eofOK)
2903 unsigned int len = 0;
2931 state->slabFreeHead =
buf->nextfree;
2980 shared->
nTapes = nWorkers;
2981 for (
i = 0;
i < nWorkers;
i++)
3056 state->memtuples = NULL;
3057 state->memtupsize = 0;
3104 int nParticipants =
state->nParticipants;
3105 int workersFinished;
3109 Assert(nParticipants >= 1);
3115 if (nParticipants != workersFinished)
3116 elog(
ERROR,
"cannot take over tapes before all workers finish");
3130 state->currentRun = nParticipants;
3140 state->inputTapes = NULL;
3141 state->nInputTapes = 0;
3142 state->nInputRuns = 0;
3145 state->nOutputTapes = nParticipants;
3146 state->nOutputRuns = nParticipants;
3148 for (
j = 0;
j < nParticipants;
j++)
3181 #if SIZEOF_DATUM >= 8
void PrepareTempTablespaces(void)
#define FLEXIBLE_ARRAY_MEMBER
#define pg_attribute_always_inline
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
MemoryContext GenerationContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
static int compare(const void *arg1, const void *arg2)
Assert(fmt[strlen(fmt) - 1] !='\n')
LogicalTape * LogicalTapeCreate(LogicalTapeSet *lts)
void LogicalTapeRewindForRead(LogicalTape *lt, size_t buffer_size)
void LogicalTapeSetForgetFreeSpace(LogicalTapeSet *lts)
size_t LogicalTapeBackspace(LogicalTape *lt, size_t size)
size_t LogicalTapeRead(LogicalTape *lt, void *ptr, size_t size)
int64 LogicalTapeSetBlocks(LogicalTapeSet *lts)
void LogicalTapeClose(LogicalTape *lt)
void LogicalTapeSetClose(LogicalTapeSet *lts)
void LogicalTapeSeek(LogicalTape *lt, int64 blocknum, int offset)
void LogicalTapeTell(LogicalTape *lt, int64 *blocknum, int *offset)
void LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size)
LogicalTapeSet * LogicalTapeSetCreate(bool preallocate, SharedFileSet *fileset, int worker)
void LogicalTapeFreeze(LogicalTape *lt, TapeShare *share)
LogicalTape * LogicalTapeImport(LogicalTapeSet *lts, int worker, TapeShare *shared)
void MemoryContextReset(MemoryContext context)
void pfree(void *pointer)
Size GetMemoryChunkSpace(void *pointer)
void * palloc0(Size size)
MemoryContext CurrentMemoryContext
void * MemoryContextAlloc(MemoryContext context, Size size)
void MemoryContextDelete(MemoryContext context)
void MemoryContextResetOnly(MemoryContext context)
void * repalloc_huge(void *pointer, Size size)
#define AllocSetContextCreate
#define ALLOCSET_DEFAULT_SIZES
#define CHECK_FOR_INTERRUPTS()
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
const char * pg_rusage_show(const PGRUsage *ru0)
void pg_rusage_init(PGRUsage *ru0)
static int64 DatumGetInt64(Datum X)
static int32 DatumGetInt32(Datum X)
void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg)
void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
Size add_size(Size s1, Size s2)
Size mul_size(Size s1, Size s2)
static int ApplyUnsignedSortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
static int ApplyInt32SortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
#define SpinLockInit(lock)
#define SpinLockRelease(lock)
#define SpinLockAcquire(lock)
TapeShare tapes[FLEXIBLE_ARRAY_MEMBER]
TuplesortMethod sortMethod
TuplesortSpaceType spaceType
LogicalTape ** inputTapes
LogicalTape ** outputTapes
TupSortStatus maxSpaceStatus
LogicalTape * result_tape
void tuplesort_rescan(Tuplesortstate *state)
void tuplesort_performsort(Tuplesortstate *state)
int tuplesort_merge_order(int64 allowedMem)
#define TAPE_BUFFER_OVERHEAD
static void tuplesort_heap_delete_top(Tuplesortstate *state)
#define INITIAL_MEMTUPSIZE
static unsigned int getlen(LogicalTape *tape, bool eofOK)
void tuplesort_initialize_shared(Sharedsort *shared, int nWorkers, dsm_segment *seg)
#define COMPARETUP(state, a, b)
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
static void selectnewtape(Tuplesortstate *state)
void tuplesort_reset(Tuplesortstate *state)
static void markrunend(LogicalTape *tape)
bool tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
#define REMOVEABBREV(state, stup, count)
static void reversedirection(Tuplesortstate *state)
#define USEMEM(state, amt)
static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple)
static bool grow_memtuples(Tuplesortstate *state)
int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup)
static void beginmerge(Tuplesortstate *state)
static void make_bounded_heap(Tuplesortstate *state)
bool tuplesort_used_bound(Tuplesortstate *state)
#define WRITETUP(state, tape, stup)
static void sort_bounded_heap(Tuplesortstate *state)
static int worker_get_identifier(Tuplesortstate *state)
static void mergeonerun(Tuplesortstate *state)
#define FREEMEM(state, amt)
const char * tuplesort_space_type_name(TuplesortSpaceType t)
static void inittapestate(Tuplesortstate *state, int maxTapes)
static void leader_takeover_tapes(Tuplesortstate *state)
Size tuplesort_estimate_shared(int nWorkers)
void tuplesort_get_stats(Tuplesortstate *state, TuplesortInstrumentation *stats)
static void tuplesort_sort_memtuples(Tuplesortstate *state)
void tuplesort_end(Tuplesortstate *state)
static void inittapes(Tuplesortstate *state, bool mergeruns)
void tuplesort_markpos(Tuplesortstate *state)
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev)
#define MERGE_BUFFER_SIZE
#define READTUP(state, stup, tape, len)
int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup)
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
static int64 merge_read_buffer_size(int64 avail_mem, int nInputTapes, int nInputRuns, int maxOutputTapes)
static bool mergereadnext(Tuplesortstate *state, LogicalTape *srcTape, SortTuple *stup)
static void tuplesort_updatemax(Tuplesortstate *state)
static void worker_freeze_result_tape(Tuplesortstate *state)
#define RELEASE_SLAB_SLOT(state, tuple)
void tuplesort_attach_shared(Sharedsort *shared, dsm_segment *seg)
static void worker_nomergeruns(Tuplesortstate *state)
static pg_attribute_always_inline int qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple)
void tuplesort_restorepos(Tuplesortstate *state)
static pg_attribute_always_inline int qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
static void mergeruns(Tuplesortstate *state)
void * tuplesort_readtup_alloc(Tuplesortstate *state, Size tuplen)
static void tuplesort_begin_batch(Tuplesortstate *state)
void tuplesort_set_bound(Tuplesortstate *state, int64 bound)
static void init_slab_allocator(Tuplesortstate *state, int numSlots)
const char * tuplesort_method_name(TuplesortMethod m)
static bool consider_abort_common(Tuplesortstate *state)
static void tuplesort_free(Tuplesortstate *state)
static void dumptuples(Tuplesortstate *state, bool alltuples)
#define TUPLESORT_RANDOMACCESS
#define TUPLESORT_ALLOWBOUNDED
@ SORT_TYPE_EXTERNAL_SORT
@ SORT_TYPE_TOP_N_HEAPSORT
@ SORT_TYPE_STILL_IN_PROGRESS
@ SORT_TYPE_EXTERNAL_MERGE
char buffer[SLAB_SLOT_SIZE]
union SlabSlot * nextfree