122 #define CLUSTER_SORT 3 125 #define PARALLEL_SORT(state) ((state)->shared == NULL ? 0 : \ 126 (state)->worker >= 0 ? 1 : 2) 135 #define INITIAL_MEMTUPSIZE Max(1024, \ 136 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1) 143 #ifdef DEBUG_BOUNDED_SORT 144 bool optimize_bounded_sort =
true;
196 #define SLAB_SLOT_SIZE 1024 231 #define TAPE_BUFFER_OVERHEAD BLCKSZ 232 #define MERGE_BUFFER_SIZE (BLCKSZ * 32) 302 int tapenum,
unsigned int len);
521 #define IS_SLAB_SLOT(state, tuple) \ 522 ((char *) (tuple) >= (state)->slabMemoryBegin && \ 523 (char *) (tuple) < (state)->slabMemoryEnd) 529 #define RELEASE_SLAB_SLOT(state, tuple) \ 531 SlabSlot *buf = (SlabSlot *) tuple; \ 533 if (IS_SLAB_SLOT((state), buf)) \ 535 buf->nextfree = (state)->slabFreeHead; \ 536 (state)->slabFreeHead = buf; \ 541 #define COMPARETUP(state,a,b) ((*(state)->comparetup) (a, b, state)) 542 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup)) 543 #define WRITETUP(state,tape,stup) ((*(state)->writetup) (state, tape, stup)) 544 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len)) 545 #define LACKMEM(state) ((state)->availMem < 0 && !(state)->slabAllocatorUsed) 546 #define USEMEM(state,amt) ((state)->availMem -= (amt)) 547 #define FREEMEM(state,amt) ((state)->availMem += (amt)) 548 #define SERIAL(state) ((state)->shared == NULL) 549 #define WORKER(state) ((state)->shared && (state)->worker != -1) 550 #define LEADER(state) ((state)->shared && (state)->worker == -1) 602 #define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \ 604 if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \ 605 elog(ERROR, "unexpected end of data"); \ 640 int tapenum,
unsigned int len);
647 int tapenum,
unsigned int len);
656 int tapenum,
unsigned int len);
663 int tapenum,
unsigned int len);
679 #include "qsort_tuple.c" 711 if (coordinate && randomAccess)
712 elog(
ERROR,
"random access disallowed under parallel sort");
863 elog(
ERROR,
"insufficient memory allowed for sort");
880 Oid *sortOperators,
Oid *sortCollations,
881 bool *nullsFirstFlags,
896 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
897 nkeys, workMem, randomAccess ?
't' :
'f');
900 state->
nKeys = nkeys;
920 for (i = 0; i < nkeys; i++)
970 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
972 workMem, randomAccess ?
't' :
'f');
1017 for (i = 0; i < state->
nKeys; i++)
1039 pfree(indexScanKey);
1065 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
1066 enforceUnique ?
't' :
'f',
1067 workMem, randomAccess ?
't' :
'f');
1095 for (i = 0; i < state->
nKeys; i++)
1117 pfree(indexScanKey);
1143 "begin index sort: high_mask = 0x%x, low_mask = 0x%x, " 1144 "max_buckets = 0x%x, workMem = %d, randomAccess = %c",
1148 workMem, randomAccess ?
't' :
'f');
1187 "begin index sort: workMem = %d, randomAccess = %c",
1188 workMem, randomAccess ?
't' :
'f');
1205 for (i = 0; i < state->
nKeys; i++)
1229 bool nullsFirstFlag,
int workMem,
1243 "begin datum sort: workMem = %d, randomAccess = %c",
1244 workMem, randomAccess ?
't' :
'f');
1267 state->
tuples = !typbyval;
1328 #ifdef DEBUG_BOUNDED_SORT 1330 if (!optimize_bounded_sort)
1335 if (bound > (int64) (INT_MAX / 2))
1339 state->
bound = (int) bound;
1399 elog(
LOG,
"%s of worker %d ended, %ld disk blocks used: %s",
1400 SERIAL(state) ?
"external sort" :
"parallel external sort",
1403 elog(
LOG,
"%s of worker %d ended, %ld KB used: %s",
1404 SERIAL(state) ?
"internal sort" :
"unperformed parallel sort",
1408 TRACE_POSTGRESQL_SORT_DONE(state->
tapeset != NULL, spaceUsed);
1415 TRACE_POSTGRESQL_SORT_DONE(state->
tapeset != NULL, 0L);
1419 if (state->
estate != NULL)
1483 isSpaceDisk =
false;
1557 if (memNowUsed <= state->availMem)
1563 if (memtupsize < INT_MAX / 2)
1564 newmemtupsize = memtupsize * 2;
1567 newmemtupsize = INT_MAX;
1601 grow_ratio = (double) state->
allowedMem / (
double) memNowUsed;
1602 if (memtupsize * grow_ratio < INT_MAX)
1603 newmemtupsize = (int) (memtupsize * grow_ratio);
1605 newmemtupsize = INT_MAX;
1612 if (newmemtupsize <= memtupsize)
1639 if (state->
availMem < (int64) ((newmemtupsize - memtupsize) *
sizeof(
SortTuple)))
1650 elog(
ERROR,
"unexpected out-of-memory situation in tuplesort");
1674 COPYTUP(state, &stup, (
void *) slot);
1696 COPYTUP(state, &stup, (
void *) tup);
1719 tuple->
t_tid = *
self;
1767 tuple = mtup->
tuple;
1803 if (isNull || !state->
tuples)
1908 elog(
LOG,
"switching to bounded heapsort at %d tuples: %s",
2027 elog(
LOG,
"performsort of worker %d starting: %s",
2112 elog(
LOG,
"performsort of worker %d done (except %d-way final merge): %s",
2116 elog(
LOG,
"performsort of worker %d done: %s",
2135 unsigned int tuplen;
2160 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
2240 2 *
sizeof(
unsigned int));
2243 else if (nmoved != 2 *
sizeof(
unsigned int))
2244 elog(
ERROR,
"unexpected tape position");
2255 sizeof(
unsigned int));
2258 else if (nmoved !=
sizeof(
unsigned int))
2259 elog(
ERROR,
"unexpected tape position");
2267 tuplen + 2 *
sizeof(
unsigned int));
2268 if (nmoved == tuplen +
sizeof(
unsigned int))
2279 else if (nmoved != tuplen + 2 *
sizeof(
unsigned int))
2280 elog(
ERROR,
"bogus tuple length in backward scan");
2293 if (nmoved != tuplen)
2294 elog(
ERROR,
"bogus tuple length in backward scan");
2544 elog(
ERROR,
"retrieved too many tuples in a bounded sort");
2556 while (ntuples-- > 0)
2644 elog(
LOG,
"worker %d switching to external sort with %d tapes: %s",
2660 for (j = 0; j < maxTapes; j++)
2696 USEMEM(state, tapeSpace);
2767 USEMEM(state, numSlots * SLAB_SLOT_SIZE);
2770 for (i = 0; i < numSlots - 1; i++)
2843 if (state->
Level == 1)
2846 numTapes = numInputTapes + 1;
2894 elog(
LOG,
"worker %d using " INT64_FORMAT " KB of memory for read buffers among %d input tapes",
2902 for (tapenum = 0; tapenum < state->
tapeRange; tapenum++)
2915 bool allOneRun =
true;
2918 for (tapenum = 0; tapenum < state->
tapeRange; tapenum++)
2941 bool allDummy =
true;
2943 for (tapenum = 0; tapenum < state->
tapeRange; tapenum++)
2955 for (tapenum = 0; tapenum < state->
tapeRange; tapenum++)
2963 if (--state->
Level == 0)
2978 for (tapenum = state->
tapeRange; tapenum > 0; tapenum--)
3005 for (tapenum = 0; tapenum < state->
maxTapes; tapenum++)
3069 elog(
LOG,
"worker %d finished %d-way merge step: %s", state->
worker,
3095 for (tapenum = 0; tapenum < state->
tapeRange; tapenum++)
3112 for (srcTape = 0; srcTape < state->
maxTapes; srcTape++)
3132 unsigned int tuplen;
3138 if ((tuplen =
getlen(state, srcTape,
true)) == 0)
3143 READTUP(state, stup, srcTape, tuplen);
3196 (
errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
3197 errmsg(
"cannot have more than %d runs for an external sort",
3204 elog(
LOG,
"worker %d starting quicksort of run %d: %s",
3217 elog(
LOG,
"worker %d finished quicksort of run %d: %s",
3223 for (i = 0; i < memtupwrite; i++)
3245 elog(
LOG,
"worker %d finished writing run %d to tape %d: %s",
3408 return "still in progress";
3410 return "top-N heapsort";
3414 return "external sort";
3416 return "external merge";
3461 for (i = 0; i < tupcount; i++)
3581 int i = (j - 1) >> 1;
3583 if (
COMPARETUP(state, tuple, &memtuples[i]) >= 0)
3585 memtuples[j] = memtuples[
i];
3588 memtuples[j] = *tuple;
3642 unsigned int j = 2 * i + 1;
3647 COMPARETUP(state, &memtuples[j], &memtuples[j + 1]) > 0)
3649 if (
COMPARETUP(state, tuple, &memtuples[j]) <= 0)
3651 memtuples[
i] = memtuples[j];
3654 memtuples[
i] = *tuple;
3668 for (nkey = 0; nkey < state->
nKeys; nkey++, sortKey++)
3686 &len,
sizeof(len)) !=
sizeof(len))
3688 if (len == 0 && !eofOK)
3696 unsigned int len = 0;
3769 datum1 =
heap_getattr(<up, attno, tupDesc, &isnull1);
3770 datum2 =
heap_getattr(&rtup, attno, tupDesc, &isnull2);
3780 for (nkey = 1; nkey < state->
nKeys; nkey++, sortKey++)
3784 datum1 =
heap_getattr(<up, attno, tupDesc, &isnull1);
3785 datum2 =
heap_getattr(&rtup, attno, tupDesc, &isnull2);
3812 stup->
tuple = (
void *) tuple;
3885 unsigned int tuplen = tupbodylen +
sizeof(int);
3888 (
void *) &tuplen,
sizeof(tuplen));
3890 (
void *) tupbody, tupbodylen);
3893 (
void *) &tuplen,
sizeof(tuplen));
3904 int tapenum,
unsigned int len)
3906 unsigned int tupbodylen = len -
sizeof(int);
3909 char *tupbody = (
char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3913 tuple->
t_len = tuplen;
3915 tupbody, tupbodylen);
3918 &tuplen,
sizeof(tuplen));
3919 stup->
tuple = (
void *) tuple;
3966 datum1 =
heap_getattr(ltup, leading, tupDesc, &isnull1);
3967 datum2 =
heap_getattr(rtup, leading, tupDesc, &isnull2);
3973 if (compare != 0 || state->
nKeys == 1)
3989 for (; nkey < state->
nKeys; nkey++, sortKey++)
4024 l_index_values, l_index_isnull);
4028 r_index_values, r_index_isnull);
4030 for (; nkey < state->
nKeys; nkey++, sortKey++)
4033 l_index_isnull[nkey],
4034 r_index_values[nkey],
4035 r_index_isnull[nkey],
4054 stup->
tuple = (
void *) tuple;
4126 &tuplen,
sizeof(tuplen));
4133 &tuplen,
sizeof(tuplen));
4144 int tapenum,
unsigned int tuplen)
4152 tuple->
t_len = t_len;
4162 &tuplen,
sizeof(tuplen));
4163 stup->
tuple = (
void *) tuple;
4194 bool equal_hasnull =
false;
4213 keysz = state->
nKeys;
4230 equal_hasnull =
true;
4233 for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
4246 equal_hasnull =
true;
4270 Assert(tuple1 != tuple2);
4277 (
errcode(ERRCODE_UNIQUE_VIOLATION),
4278 errmsg(
"could not create unique index \"%s\"",
4280 key_desc ?
errdetail(
"Key %s is duplicated.", key_desc) :
4297 return (blk1 < blk2) ? -1 : 1;
4304 return (pos1 < pos2) ? -1 : 1;
4334 if (bucket1 > bucket2)
4336 else if (bucket1 < bucket2)
4352 return (blk1 < blk2) ? -1 : 1;
4359 return (pos1 < pos2) ? -1 : 1;
4372 elog(
ERROR,
"copytup_index() should not be called");
4379 unsigned int tuplen;
4383 (
void *) &tuplen,
sizeof(tuplen));
4388 (
void *) &tuplen,
sizeof(tuplen));
4399 int tapenum,
unsigned int len)
4401 unsigned int tuplen = len -
sizeof(
unsigned int);
4408 &tuplen,
sizeof(tuplen));
4409 stup->
tuple = (
void *) tuple;
4446 elog(
ERROR,
"copytup_datum() should not be called");
4453 unsigned int tuplen;
4454 unsigned int writtenlen;
4464 tuplen =
sizeof(
Datum);
4468 waddr = stup->
tuple;
4473 writtenlen = tuplen +
sizeof(
unsigned int);
4476 (
void *) &writtenlen,
sizeof(writtenlen));
4481 (
void *) &writtenlen,
sizeof(writtenlen));
4492 int tapenum,
unsigned int len)
4494 unsigned int tuplen = len -
sizeof(
unsigned int);
4519 stup->
tuple = raddr;
4524 &tuplen,
sizeof(tuplen));
4569 shared->
nTapes = nWorkers;
4570 for (i = 0; i < nWorkers; i++)
4693 int workersFinished;
4697 Assert(nParticipants >= 1);
4703 if (nParticipants != workersFinished)
4704 elog(
ERROR,
"cannot take over tapes before all workers finish");
4732 for (j = 0; j < state->
maxTapes; j++)
bool tuplesort_used_bound(Tuplesortstate *state)
struct SortSupportData * SortSupport
IndexTuple tuplesort_getindextuple(Tuplesortstate *state, bool forward)
#define MINIMAL_TUPLE_DATA_OFFSET
static int comparetup_index_hash(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
void FormIndexDatum(IndexInfo *indexInfo, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
HeapTuple heap_copytuple(HeapTuple tuple)
#define DatumGetUInt32(X)
LogicalTapeSet * LogicalTapeSetCreate(int ntapes, bool preallocate, TapeShare *shared, SharedFileSet *fileset, int worker)
size_t LogicalTapeRead(LogicalTapeSet *lts, int tapenum, void *ptr, size_t size)
static int comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void dumptuples(Tuplesortstate *state, bool alltuples)
static void reversedirection(Tuplesortstate *state)
bool tuplesort_getdatum(Tuplesortstate *state, bool forward, Datum *val, bool *isNull, Datum *abbrev)
static void writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup)
void tuplesort_performsort(Tuplesortstate *state)
static bool grow_memtuples(Tuplesortstate *state)
void MemoryContextDelete(MemoryContext context)
#define AllocSetContextCreate
HeapTupleData * HeapTuple
HeapTuple tuplesort_getheaptuple(Tuplesortstate *state, bool forward)
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
#define BTGreaterStrategyNumber
const char * tuplesort_space_type_name(TuplesortSpaceType t)
void tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
#define AssertState(condition)
Tuplesortstate * tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, SortCoordinate coordinate, bool randomAccess)
void tuplesort_restorepos(Tuplesortstate *state)
static void mergeonerun(Tuplesortstate *state)
static void worker_freeze_result_tape(Tuplesortstate *state)
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
static void sort_bounded_heap(Tuplesortstate *state)
void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
#define ResetPerTupleExprContext(estate)
#define RelationGetDescr(relation)
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
#define RelationGetNumberOfAttributes(relation)
static void output(uint64 loop_count)
#define PointerGetDatum(X)
HeapTupleHeaderData * HeapTupleHeader
static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
int(* SortTupleComparator)(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
MemoryContext maincontext
#define SpinLockInit(lock)
void(* copytup)(Tuplesortstate *state, SortTuple *stup, void *tup)
char buffer[SLAB_SLOT_SIZE]
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Tuplesortstate * tuplesort_begin_cluster(TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, bool randomAccess)
SortTupleComparator comparetup
#define FLEXIBLE_ARRAY_MEMBER
int errcode(int sqlerrcode)
#define LogicalTapeReadExact(tapeset, tapenum, ptr, len)
Tuplesortstate * tuplesort_begin_index_hash(Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, uint32 max_buckets, int workMem, SortCoordinate coordinate, bool randomAccess)
Size GetMemoryChunkSpace(void *pointer)
static int comparetup_cluster(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
void MemoryContextReset(MemoryContext context)
IndexInfo * BuildIndexInfo(Relation index)
static void inittapes(Tuplesortstate *state, bool mergeruns)
void tuplesort_initialize_shared(Sharedsort *shared, int nWorkers, dsm_segment *seg)
void heap_freetuple(HeapTuple htup)
static int worker_get_identifier(Tuplesortstate *state)
static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
static int comparetup_index_btree(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
void tuplesort_get_stats(Tuplesortstate *state, TuplesortInstrumentation *stats)
const char * tuplesort_method_name(TuplesortMethod m)
MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup)
void MemoryContextResetOnly(MemoryContext context)
static void mergeruns(Tuplesortstate *state)
void LogicalTapeRewindForWrite(LogicalTapeSet *lts, int tapenum)
static void markrunend(Tuplesortstate *state, int tapenum)
static void tuplesort_updatemax(Tuplesortstate *state)
static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK)
#define PARALLEL_SORT(state)
static void init_slab_allocator(Tuplesortstate *state, int numSlots)
void pg_rusage_init(PGRUsage *ru0)
Tuplesortstate * tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, AttrNumber *attNums, Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags, int workMem, SortCoordinate coordinate, bool randomAccess)
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
static void tuplesort_begin_batch(Tuplesortstate *state)
void FreeExecutorState(EState *estate)
#define GetPerTupleExprContext(estate)
int errtableconstraint(Relation rel, const char *conname)
#define SpinLockAcquire(lock)
#define TAPE_BUFFER_OVERHEAD
static bool consider_abort_common(Tuplesortstate *state)
Tuplesortstate * tuplesort_begin_index_gist(Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, bool randomAccess)
void pfree(void *pointer)
union SlabSlot * nextfree
TupSortStatus maxSpaceStatus
static int compare(const void *arg1, const void *arg2)
void LogicalTapeWrite(LogicalTapeSet *lts, int tapenum, void *ptr, size_t size)
static void readtup_cluster(Tuplesortstate *state, SortTuple *stup, int tapenum, unsigned int len)
void PrepareTempTablespaces(void)
void tuplesort_reset(Tuplesortstate *state)
void PrepareSortSupportFromGistIndexRel(Relation indexRel, SortSupport ssup)
void heap_free_minimal_tuple(MinimalTuple mtup)
MemoryContext sortcontext
static void writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup)
static void writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup)
#define MERGE_BUFFER_SIZE
static int comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
void tuplesort_rescan(Tuplesortstate *state)
void(* readtup)(Tuplesortstate *state, SortTuple *stup, int tapenum, unsigned int len)
#define ALLOCSET_DEFAULT_SIZES
bool tuplesort_gettupleslot(Tuplesortstate *state, bool forward, bool copy, TupleTableSlot *slot, Datum *abbrev)
int(* comparator)(Datum x, Datum y, SortSupport ssup)
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
static void readtup_index(Tuplesortstate *state, SortTuple *stup, int tapenum, unsigned int len)
static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup)
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
void tuplesort_set_bound(Tuplesortstate *state, int64 bound)
int(* abbrev_full_comparator)(Datum x, Datum y, SortSupport ssup)
TuplesortMethod sortMethod
static void readtup_datum(Tuplesortstate *state, SortTuple *stup, int tapenum, unsigned int len)
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
MinimalTupleData * MinimalTuple
IndexTupleData * IndexTuple
#define COMPARETUP(state, a, b)
int errdetail(const char *fmt,...)
size_t LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size)
static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple)
const char * pg_rusage_show(const PGRUsage *ru0)
#define FREEMEM(state, amt)
#define RelationGetRelationName(relation)
Datum(* abbrev_converter)(Datum original, SortSupport ssup)
static bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
void index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor, Datum *values, bool *isnull)
void LogicalTapeTell(LogicalTapeSet *lts, int tapenum, long *blocknum, int *offset)
MemoryContext CurrentMemoryContext
#define IndexRelationGetNumberOfKeyAttributes(relation)
void PrepareSortSupportFromIndexRel(Relation indexRel, int16 strategy, SortSupport ssup)
static void selectnewtape(Tuplesortstate *state)
static void tuplesort_free(Tuplesortstate *state)
#define AssertArg(condition)
Datum datumCopy(Datum value, bool typByVal, int typLen)
#define COPYTUP(state, stup, tup)
EState * CreateExecutorState(void)
static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
#define heap_getattr(tup, attnum, tupleDesc, isnull)
#define READTUP(state, stup, tape, len)
#define SK_BT_NULLS_FIRST
#define WRITETUP(state, tape, stup)
#define SpinLockRelease(lock)
Size mul_size(Size s1, Size s2)
static void writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup)
#define RELEASE_SLAB_SLOT(state, tuple)
void * palloc0(Size size)
Size add_size(Size s1, Size s2)
static void puttuple_common(Tuplesortstate *state, SortTuple *tuple)
void(* writetup)(Tuplesortstate *state, int tapenum, SortTuple *stup)
static void tuplesort_heap_delete_top(Tuplesortstate *state)
void tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel, ItemPointer self, Datum *values, bool *isnull)
#define ereport(elevel,...)
static bool mergereadnext(Tuplesortstate *state, int srcTape, SortTuple *stup)
void tuplesort_attach_shared(Sharedsort *shared, dsm_segment *seg)
#define Assert(condition)
bool(* abbrev_abort)(int memtupcount, SortSupport ssup)
struct ItemPointerData ItemPointerData
int tuplesort_merge_order(int64 allowedMem)
void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
TupleTableSlot * ecxt_scantuple
#define INITIAL_MEMTUPSIZE
#define index_getattr(tup, attnum, tupleDesc, isnull)
static void tuplesort_sort_memtuples(Tuplesortstate *state)
#define ItemPointerGetOffsetNumber(pointer)
ScanKeyData scankeys[INDEX_MAX_KEYS]
Size tuplesort_estimate_shared(int nWorkers)
#define MINIMAL_TUPLE_OFFSET
void LogicalTapeRewindForRead(LogicalTapeSet *lts, int tapenum, size_t buffer_size)
static void readtup_heap(Tuplesortstate *state, SortTuple *stup, int tapenum, unsigned int len)
static void leader_takeover_tapes(Tuplesortstate *state)
#define DatumGetPointer(X)
void LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum, TapeShare *share)
static Datum values[MAXATTR]
static Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, bool randomAccess)
static void * readtup_alloc(Tuplesortstate *state, Size tuplen)
MemoryContext tuplecontext
void * repalloc_huge(void *pointer, Size size)
int errmsg(const char *fmt,...)
void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg)
#define USEMEM(state, amt)
void * MemoryContextAlloc(MemoryContext context, Size size)
TapeShare tapes[FLEXIBLE_ARRAY_MEMBER]
void tuplesort_markpos(Tuplesortstate *state)
const TupleTableSlotOps TTSOpsHeapTuple
void tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
void LogicalTapeSetClose(LogicalTapeSet *lts)
void LogicalTapeSeek(LogicalTapeSet *lts, int tapenum, long blocknum, int offset)
TuplesortSpaceType spaceType
#define CHECK_FOR_INTERRUPTS()
Tuplesortstate * tuplesort_begin_index_btree(Relation heapRel, Relation indexRel, bool enforceUnique, int workMem, SortCoordinate coordinate, bool randomAccess)
void LogicalTapeSetForgetFreeSpace(LogicalTapeSet *lts)
char * BuildIndexValueDescription(Relation indexRelation, Datum *values, bool *isnull)
#define ItemPointerGetBlockNumber(pointer)
long LogicalTapeSetBlocks(LogicalTapeSet *lts)
AttrNumber ii_IndexAttrNumbers[INDEX_MAX_KEYS]
void tuplesort_end(Tuplesortstate *state)
#define BTLessStrategyNumber
static void worker_nomergeruns(Tuplesortstate *state)
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Size datumGetSize(Datum value, bool typByVal, int typLen)
bool tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
static void make_bounded_heap(Tuplesortstate *state)
static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple)
static void beginmerge(Tuplesortstate *state)
static void inittapestate(Tuplesortstate *state, int maxTapes)
TupleTableSlot * ExecStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
#define offsetof(type, field)
#define IndexTupleSize(itup)
void tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
static int ApplySortAbbrevFullComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)