PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
nodeSetOp.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nodeSetOp.c
4 * Routines to handle INTERSECT and EXCEPT selection
5 *
6 * The input of a SetOp node consists of two relations (outer and inner)
7 * with identical column sets. In EXCEPT queries the outer relation is
8 * always the left side, while in INTERSECT cases the planner tries to
9 * make the outer relation be the smaller of the two inputs.
10 *
11 * In SETOP_SORTED mode, each input has been sorted according to all the
12 * grouping columns. The SetOp node essentially performs a merge join on
13 * the grouping columns, except that it is only interested in counting how
14 * many tuples from each input match. Then it is a simple matter to emit
15 * the output demanded by the SQL spec for INTERSECT, INTERSECT ALL, EXCEPT,
16 * or EXCEPT ALL.
17 *
18 * In SETOP_HASHED mode, the inputs are delivered in no particular order.
19 * We read the outer relation and build a hash table in memory with one entry
20 * for each group of identical tuples, counting the number of tuples in the
21 * group. Then we read the inner relation and count the number of tuples
22 * matching each outer group. (We can disregard any tuples appearing only
23 * in the inner relation, since they cannot result in any output.) After
24 * seeing all the input, we scan the hashtable and generate the correct
25 * output using those counts.
26 *
27 * This node type is not used for UNION or UNION ALL, since those can be
28 * implemented more cheaply (there's no need to count the number of
29 * matching tuples).
30 *
31 * Note that SetOp does no qual checking nor projection. The delivered
32 * output tuples are just copies of the first-to-arrive tuple in each
33 * input group.
34 *
35 *
36 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
37 * Portions Copyright (c) 1994, Regents of the University of California
38 *
39 *
40 * IDENTIFICATION
41 * src/backend/executor/nodeSetOp.c
42 *
43 *-------------------------------------------------------------------------
44 */
45
46#include "postgres.h"
47
48#include "access/htup_details.h"
49#include "executor/executor.h"
50#include "executor/nodeSetOp.h"
51#include "miscadmin.h"
52#include "utils/memutils.h"
53
54
55/*
56 * SetOpStatePerGroupData - per-group working state
57 *
58 * In SETOP_SORTED mode, we need only one of these structs, and it's just a
59 * local in setop_retrieve_sorted. In SETOP_HASHED mode, the hash table
60 * contains one of these for each tuple group.
61 */
63{
64 int64 numLeft; /* number of left-input dups in group */
65 int64 numRight; /* number of right-input dups in group */
67
69
70
72static void setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan,
73 SetOpState *setopstate);
75 SetOpState *setopstate);
76static void setop_fill_hash_table(SetOpState *setopstate);
78
79
80/*
81 * Initialize the hash table to empty.
82 */
83static void
85{
86 SetOp *node = (SetOp *) setopstate->ps.plan;
87 ExprContext *econtext = setopstate->ps.ps_ExprContext;
88 TupleDesc desc = ExecGetResultType(outerPlanState(setopstate));
89
91 Assert(node->numGroups > 0);
92
93 /*
94 * If both child plans deliver the same fixed tuple slot type, we can tell
95 * BuildTupleHashTable to expect that slot type as input. Otherwise,
96 * we'll pass NULL denoting that any slot type is possible.
97 */
98 setopstate->hashtable = BuildTupleHashTable(&setopstate->ps,
99 desc,
100 ExecGetCommonChildSlotOps(&setopstate->ps),
101 node->numCols,
102 node->cmpColIdx,
103 setopstate->eqfuncoids,
104 setopstate->hashfunctions,
105 node->cmpCollations,
106 node->numGroups,
108 setopstate->ps.state->es_query_cxt,
109 setopstate->tableContext,
110 econtext->ecxt_per_tuple_memory,
111 false);
112}
113
114/*
115 * We've completed processing a tuple group. Decide how many copies (if any)
116 * of its representative row to emit, and store the count into numOutput.
117 * This logic is straight from the SQL92 specification.
118 */
119static void
121{
122 SetOp *plannode = (SetOp *) setopstate->ps.plan;
123
124 switch (plannode->cmd)
125 {
127 if (pergroup->numLeft > 0 && pergroup->numRight > 0)
128 setopstate->numOutput = 1;
129 else
130 setopstate->numOutput = 0;
131 break;
133 setopstate->numOutput =
134 (pergroup->numLeft < pergroup->numRight) ?
135 pergroup->numLeft : pergroup->numRight;
136 break;
137 case SETOPCMD_EXCEPT:
138 if (pergroup->numLeft > 0 && pergroup->numRight == 0)
139 setopstate->numOutput = 1;
140 else
141 setopstate->numOutput = 0;
142 break;
144 setopstate->numOutput =
145 (pergroup->numLeft < pergroup->numRight) ?
146 0 : (pergroup->numLeft - pergroup->numRight);
147 break;
148 default:
149 elog(ERROR, "unrecognized set op: %d", (int) plannode->cmd);
150 break;
151 }
152}
153
154
155/* ----------------------------------------------------------------
156 * ExecSetOp
157 * ----------------------------------------------------------------
158 */
159static TupleTableSlot * /* return: a tuple or NULL */
161{
162 SetOpState *node = castNode(SetOpState, pstate);
163 SetOp *plannode = (SetOp *) node->ps.plan;
164 TupleTableSlot *resultTupleSlot = node->ps.ps_ResultTupleSlot;
165
167
168 /*
169 * If the previously-returned tuple needs to be returned more than once,
170 * keep returning it.
171 */
172 if (node->numOutput > 0)
173 {
174 node->numOutput--;
175 return resultTupleSlot;
176 }
177
178 /* Otherwise, we're done if we are out of groups */
179 if (node->setop_done)
180 return NULL;
181
182 /* Fetch the next tuple group according to the correct strategy */
183 if (plannode->strategy == SETOP_HASHED)
184 {
185 if (!node->table_filled)
187 return setop_retrieve_hash_table(node);
188 }
189 else
190 return setop_retrieve_sorted(node);
191}
192
193/*
194 * ExecSetOp for non-hashed case
195 */
196static TupleTableSlot *
198{
201 TupleTableSlot *resultTupleSlot;
202
203 /*
204 * get state info from node
205 */
206 outerPlan = outerPlanState(setopstate);
207 innerPlan = innerPlanState(setopstate);
208 resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
209
210 /*
211 * If first time through, establish the invariant that setop_load_group
212 * expects: each side's nextTupleSlot is the next output from the child
213 * plan, or empty if there is no more output from it.
214 */
215 if (setopstate->need_init)
216 {
217 setopstate->need_init = false;
218
220
221 /*
222 * If the outer relation is empty, then we will emit nothing, and we
223 * don't need to read the inner relation at all.
224 */
225 if (TupIsNull(setopstate->leftInput.nextTupleSlot))
226 {
227 setopstate->setop_done = true;
228 return NULL;
229 }
230
232
233 /* Set flags that we've not completed either side's group */
234 setopstate->leftInput.needGroup = true;
235 setopstate->rightInput.needGroup = true;
236 }
237
238 /*
239 * We loop retrieving groups until we find one we should return
240 */
241 while (!setopstate->setop_done)
242 {
243 int cmpresult;
244 SetOpStatePerGroupData pergroup;
245
246 /*
247 * Fetch the rest of the current outer group, if we didn't already.
248 */
249 if (setopstate->leftInput.needGroup)
250 setop_load_group(&setopstate->leftInput, outerPlan, setopstate);
251
252 /*
253 * If no more outer groups, we're done, and don't need to look at any
254 * more of the inner relation.
255 */
256 if (setopstate->leftInput.numTuples == 0)
257 {
258 setopstate->setop_done = true;
259 break;
260 }
261
262 /*
263 * Fetch the rest of the current inner group, if we didn't already.
264 */
265 if (setopstate->rightInput.needGroup)
266 setop_load_group(&setopstate->rightInput, innerPlan, setopstate);
267
268 /*
269 * Determine whether we have matching groups on both sides (this is
270 * basically like the core logic of a merge join).
271 */
272 if (setopstate->rightInput.numTuples == 0)
273 cmpresult = -1; /* as though left input is lesser */
274 else
275 cmpresult = setop_compare_slots(setopstate->leftInput.firstTupleSlot,
276 setopstate->rightInput.firstTupleSlot,
277 setopstate);
278
279 if (cmpresult < 0)
280 {
281 /* Left group is first, and has no right matches */
282 pergroup.numLeft = setopstate->leftInput.numTuples;
283 pergroup.numRight = 0;
284 /* We'll need another left group next time */
285 setopstate->leftInput.needGroup = true;
286 }
287 else if (cmpresult == 0)
288 {
289 /* We have matching groups */
290 pergroup.numLeft = setopstate->leftInput.numTuples;
291 pergroup.numRight = setopstate->rightInput.numTuples;
292 /* We'll need to read from both sides next time */
293 setopstate->leftInput.needGroup = true;
294 setopstate->rightInput.needGroup = true;
295 }
296 else
297 {
298 /* Right group has no left matches, so we can ignore it */
299 setopstate->rightInput.needGroup = true;
300 continue;
301 }
302
303 /*
304 * Done scanning these input tuple groups. See if we should emit any
305 * copies of result tuple, and if so return the first copy. (Note
306 * that the result tuple is the same as the left input's firstTuple
307 * slot.)
308 */
309 set_output_count(setopstate, &pergroup);
310
311 if (setopstate->numOutput > 0)
312 {
313 setopstate->numOutput--;
314 return resultTupleSlot;
315 }
316 }
317
318 /* No more groups */
319 ExecClearTuple(resultTupleSlot);
320 return NULL;
321}
322
323/*
324 * Load next group of tuples from one child plan or the other.
325 *
326 * On entry, we've already read the first tuple of the next group
327 * (if there is one) into input->nextTupleSlot. This invariant
328 * is maintained on exit.
329 */
330static void
332 SetOpState *setopstate)
333{
334 input->needGroup = false;
335
336 /* If we've exhausted this child plan, report an empty group */
337 if (TupIsNull(input->nextTupleSlot))
338 {
339 ExecClearTuple(input->firstTupleSlot);
340 input->numTuples = 0;
341 return;
342 }
343
344 /* Make a local copy of the first tuple for comparisons */
346 input->firstTupleSlot,
347 true);
348 /* and count it */
349 input->numTuples = 1;
350
351 /* Scan till we find the end-of-group */
352 for (;;)
353 {
354 int cmpresult;
355
356 /* Get next input tuple, if there is one */
357 input->nextTupleSlot = ExecProcNode(inputPlan);
358 if (TupIsNull(input->nextTupleSlot))
359 break;
360
361 /* There is; does it belong to same group as firstTuple? */
362 cmpresult = setop_compare_slots(input->firstTupleSlot,
363 input->nextTupleSlot,
364 setopstate);
365 Assert(cmpresult <= 0); /* else input is mis-sorted */
366 if (cmpresult != 0)
367 break;
368
369 /* Still in same group, so count this tuple */
370 input->numTuples++;
371 }
372}
373
374/*
375 * Compare the tuples in the two given slots.
376 */
377static int
379 SetOpState *setopstate)
380{
381 /* We'll often need to fetch all the columns, so just do it */
384 for (int nkey = 0; nkey < setopstate->numCols; nkey++)
385 {
386 SortSupport sortKey = setopstate->sortKeys + nkey;
387 AttrNumber attno = sortKey->ssup_attno;
388 Datum datum1 = s1->tts_values[attno - 1],
389 datum2 = s2->tts_values[attno - 1];
390 bool isNull1 = s1->tts_isnull[attno - 1],
391 isNull2 = s2->tts_isnull[attno - 1];
392 int compare;
393
394 compare = ApplySortComparator(datum1, isNull1,
395 datum2, isNull2,
396 sortKey);
397 if (compare != 0)
398 return compare;
399 }
400 return 0;
401}
402
403/*
404 * ExecSetOp for hashed case: phase 1, read inputs and build hash table
405 */
406static void
408{
411 ExprContext *econtext = setopstate->ps.ps_ExprContext;
412 bool have_tuples = false;
413
414 /*
415 * get state info from node
416 */
417 outerPlan = outerPlanState(setopstate);
418 innerPlan = innerPlanState(setopstate);
419
420 /*
421 * Process each outer-plan tuple, and then fetch the next one, until we
422 * exhaust the outer plan.
423 */
424 for (;;)
425 {
426 TupleTableSlot *outerslot;
427 TupleHashTable hashtable = setopstate->hashtable;
428 TupleHashEntryData *entry;
429 SetOpStatePerGroup pergroup;
430 bool isnew;
431
432 outerslot = ExecProcNode(outerPlan);
433 if (TupIsNull(outerslot))
434 break;
435 have_tuples = true;
436
437 /* Find or build hashtable entry for this tuple's group */
438 entry = LookupTupleHashEntry(hashtable,
439 outerslot,
440 &isnew, NULL);
441
442 pergroup = TupleHashEntryGetAdditional(hashtable, entry);
443 /* If new tuple group, initialize counts to zero */
444 if (isnew)
445 {
446 pergroup->numLeft = 0;
447 pergroup->numRight = 0;
448 }
449
450 /* Advance the counts */
451 pergroup->numLeft++;
452
453 /* Must reset expression context after each hashtable lookup */
454 ResetExprContext(econtext);
455 }
456
457 /*
458 * If the outer relation is empty, then we will emit nothing, and we don't
459 * need to read the inner relation at all.
460 */
461 if (have_tuples)
462 {
463 /*
464 * Process each inner-plan tuple, and then fetch the next one, until
465 * we exhaust the inner plan.
466 */
467 for (;;)
468 {
469 TupleTableSlot *innerslot;
470 TupleHashTable hashtable = setopstate->hashtable;
471 TupleHashEntryData *entry;
472
473 innerslot = ExecProcNode(innerPlan);
474 if (TupIsNull(innerslot))
475 break;
476
477 /* For tuples not seen previously, do not make hashtable entry */
478 entry = LookupTupleHashEntry(hashtable,
479 innerslot,
480 NULL, NULL);
481
482 /* Advance the counts if entry is already present */
483 if (entry)
484 {
485 SetOpStatePerGroup pergroup = TupleHashEntryGetAdditional(hashtable, entry);
486
487 pergroup->numRight++;
488 }
489
490 /* Must reset expression context after each hashtable lookup */
491 ResetExprContext(econtext);
492 }
493 }
494
495 setopstate->table_filled = true;
496 /* Initialize to walk the hash table */
497 ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
498}
499
500/*
501 * ExecSetOp for hashed case: phase 2, retrieving groups from hash table
502 */
503static TupleTableSlot *
505{
506 TupleHashEntry entry;
507 TupleTableSlot *resultTupleSlot;
508
509 /*
510 * get state info from node
511 */
512 resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
513
514 /*
515 * We loop retrieving groups until we find one we should return
516 */
517 while (!setopstate->setop_done)
518 {
519 TupleHashTable hashtable = setopstate->hashtable;
520 SetOpStatePerGroup pergroup;
521
523
524 /*
525 * Find the next entry in the hash table
526 */
527 entry = ScanTupleHashTable(hashtable, &setopstate->hashiter);
528 if (entry == NULL)
529 {
530 /* No more entries in hashtable, so done */
531 setopstate->setop_done = true;
532 return NULL;
533 }
534
535 /*
536 * See if we should emit any copies of this tuple, and if so return
537 * the first copy.
538 */
539 pergroup = TupleHashEntryGetAdditional(hashtable, entry);
540 set_output_count(setopstate, pergroup);
541
542 if (setopstate->numOutput > 0)
543 {
544 setopstate->numOutput--;
546 resultTupleSlot,
547 false);
548 }
549 }
550
551 /* No more groups */
552 ExecClearTuple(resultTupleSlot);
553 return NULL;
554}
555
556/* ----------------------------------------------------------------
557 * ExecInitSetOp
558 *
559 * This initializes the setop node state structures and
560 * the node's subplan.
561 * ----------------------------------------------------------------
562 */
564ExecInitSetOp(SetOp *node, EState *estate, int eflags)
565{
566 SetOpState *setopstate;
567
568 /* check for unsupported flags */
570
571 /*
572 * create state structure
573 */
574 setopstate = makeNode(SetOpState);
575 setopstate->ps.plan = (Plan *) node;
576 setopstate->ps.state = estate;
577 setopstate->ps.ExecProcNode = ExecSetOp;
578
579 setopstate->setop_done = false;
580 setopstate->numOutput = 0;
581 setopstate->numCols = node->numCols;
582 setopstate->need_init = true;
583
584 /*
585 * create expression context
586 */
587 ExecAssignExprContext(estate, &setopstate->ps);
588
589 /*
590 * If hashing, we also need a longer-lived context to store the hash
591 * table. The table can't just be kept in the per-query context because
592 * we want to be able to throw it away in ExecReScanSetOp.
593 */
594 if (node->strategy == SETOP_HASHED)
595 setopstate->tableContext =
597 "SetOp hash table",
599
600 /*
601 * initialize child nodes
602 *
603 * If we are hashing then the child plans do not need to handle REWIND
604 * efficiently; see ExecReScanSetOp.
605 */
606 if (node->strategy == SETOP_HASHED)
607 eflags &= ~EXEC_FLAG_REWIND;
608 outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags);
609 innerPlanState(setopstate) = ExecInitNode(innerPlan(node), estate, eflags);
610
611 /*
612 * Initialize locally-allocated slots. In hashed mode, we just need a
613 * result slot. In sorted mode, we need one first-tuple-of-group slot for
614 * each input; we use the result slot for the left input's slot and create
615 * another for the right input. (Note: the nextTupleSlot slots are not
616 * ours, but just point to the last slot returned by the input plan node.)
617 */
619 if (node->strategy != SETOP_HASHED)
620 {
621 setopstate->leftInput.firstTupleSlot =
622 setopstate->ps.ps_ResultTupleSlot;
623 setopstate->rightInput.firstTupleSlot =
625 setopstate->ps.ps_ResultTupleDesc,
627 }
628
629 /* Setop nodes do no projections. */
630 setopstate->ps.ps_ProjInfo = NULL;
631
632 /*
633 * Precompute fmgr lookup data for inner loop. We need equality and
634 * hashing functions to do it by hashing, while for sorting we need
635 * SortSupport data.
636 */
637 if (node->strategy == SETOP_HASHED)
639 node->cmpOperators,
640 &setopstate->eqfuncoids,
641 &setopstate->hashfunctions);
642 else
643 {
644 int nkeys = node->numCols;
645
646 setopstate->sortKeys = (SortSupport)
647 palloc0(nkeys * sizeof(SortSupportData));
648 for (int i = 0; i < nkeys; i++)
649 {
650 SortSupport sortKey = setopstate->sortKeys + i;
651
653 sortKey->ssup_collation = node->cmpCollations[i];
654 sortKey->ssup_nulls_first = node->cmpNullsFirst[i];
655 sortKey->ssup_attno = node->cmpColIdx[i];
656 /* abbreviated key conversion is not useful here */
657 sortKey->abbreviate = false;
658
659 PrepareSortSupportFromOrderingOp(node->cmpOperators[i], sortKey);
660 }
661 }
662
663 /* Create a hash table if needed */
664 if (node->strategy == SETOP_HASHED)
665 {
666 build_hash_table(setopstate);
667 setopstate->table_filled = false;
668 }
669
670 return setopstate;
671}
672
673/* ----------------------------------------------------------------
674 * ExecEndSetOp
675 *
676 * This shuts down the subplans and frees resources allocated
677 * to this node.
678 * ----------------------------------------------------------------
679 */
680void
682{
683 /* free subsidiary stuff including hashtable */
684 if (node->tableContext)
686
689}
690
691
692void
694{
697
699 node->setop_done = false;
700 node->numOutput = 0;
701
702 if (((SetOp *) node->ps.plan)->strategy == SETOP_HASHED)
703 {
704 /*
705 * In the hashed case, if we haven't yet built the hash table then we
706 * can just return; nothing done yet, so nothing to undo. If subnode's
707 * chgParam is not NULL then it will be re-scanned by ExecProcNode,
708 * else no reason to re-scan it at all.
709 */
710 if (!node->table_filled)
711 return;
712
713 /*
714 * If we do have the hash table and the subplans do not have any
715 * parameter changes, then we can just rescan the existing hash table;
716 * no need to build it again.
717 */
718 if (outerPlan->chgParam == NULL && innerPlan->chgParam == NULL)
719 {
721 return;
722 }
723
724 /* Release any hashtable storage */
725 if (node->tableContext)
727
728 /* And rebuild an empty hashtable */
730 node->table_filled = false;
731 }
732 else
733 {
734 /* Need to re-read first input from each side */
735 node->need_init = true;
736 }
737
738 /*
739 * if chgParam of subnode is not null then plan will be re-scanned by
740 * first ExecProcNode.
741 */
742 if (outerPlan->chgParam == NULL)
744 if (innerPlan->chgParam == NULL)
746}
int16 AttrNumber
Definition: attnum.h:21
int64_t int64
Definition: c.h:499
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
void ExecReScan(PlanState *node)
Definition: execAmi.c:77
void execTuplesHashPrepare(int numCols, const Oid *eqOperators, Oid **eqFuncOids, FmgrInfo **hashFunctions)
Definition: execGrouping.c:97
TupleHashEntry LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, bool *isnew, uint32 *hash)
Definition: execGrouping.c:295
TupleHashTable BuildTupleHashTable(PlanState *parent, TupleDesc inputDesc, const TupleTableSlotOps *inputOps, int numCols, AttrNumber *keyColIdx, const Oid *eqfuncoids, FmgrInfo *hashfunctions, Oid *collations, long nbuckets, Size additionalsize, MemoryContext metacxt, MemoryContext tablecxt, MemoryContext tempcxt, bool use_variable_hash_iv)
Definition: execGrouping.c:161
void ResetTupleHashTable(TupleHashTable hashtable)
Definition: execGrouping.c:274
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:562
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1635
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate, TupleDesc tupledesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:2020
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1988
const TupleTableSlotOps TTSOpsMinimalTuple
Definition: execTuples.c:86
TupleDesc ExecGetResultType(PlanState *planstate)
Definition: execUtils.c:496
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:486
const TupleTableSlotOps * ExecGetCommonChildSlotOps(PlanState *ps)
Definition: execUtils.c:564
#define outerPlanState(node)
Definition: execnodes.h:1255
#define ScanTupleHashTable(htable, iter)
Definition: execnodes.h:893
#define ResetTupleHashIterator(htable, iter)
Definition: execnodes.h:891
#define innerPlanState(node)
Definition: execnodes.h:1254
static MinimalTuple TupleHashEntryGetTuple(TupleHashEntry entry)
Definition: executor.h:175
#define EXEC_FLAG_BACKWARD
Definition: executor.h:69
static void * TupleHashEntryGetAdditional(TupleHashTable hashtable, TupleHashEntry entry)
Definition: executor.h:189
#define ResetExprContext(econtext)
Definition: executor.h:672
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:336
#define EXEC_FLAG_MARK
Definition: executor.h:70
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
Assert(PointerIsAligned(start, uint64))
FILE * input
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:414
void * palloc0(Size size)
Definition: mcxt.c:1975
MemoryContext CurrentMemoryContext
Definition: mcxt.c:159
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:485
#define AllocSetContextCreate
Definition: memutils.h:149
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:180
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
static void setop_fill_hash_table(SetOpState *setopstate)
Definition: nodeSetOp.c:407
static void setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan, SetOpState *setopstate)
Definition: nodeSetOp.c:331
static void build_hash_table(SetOpState *setopstate)
Definition: nodeSetOp.c:84
void ExecEndSetOp(SetOpState *node)
Definition: nodeSetOp.c:681
static void set_output_count(SetOpState *setopstate, SetOpStatePerGroup pergroup)
Definition: nodeSetOp.c:120
static TupleTableSlot * ExecSetOp(PlanState *pstate)
Definition: nodeSetOp.c:160
static TupleTableSlot * setop_retrieve_hash_table(SetOpState *setopstate)
Definition: nodeSetOp.c:504
static int setop_compare_slots(TupleTableSlot *s1, TupleTableSlot *s2, SetOpState *setopstate)
Definition: nodeSetOp.c:378
static TupleTableSlot * setop_retrieve_sorted(SetOpState *setopstate)
Definition: nodeSetOp.c:197
struct SetOpStatePerGroupData SetOpStatePerGroupData
SetOpState * ExecInitSetOp(SetOp *node, EState *estate, int eflags)
Definition: nodeSetOp.c:564
SetOpStatePerGroupData * SetOpStatePerGroup
Definition: nodeSetOp.c:68
void ExecReScanSetOp(SetOpState *node)
Definition: nodeSetOp.c:693
@ SETOPCMD_EXCEPT
Definition: nodes.h:406
@ SETOPCMD_EXCEPT_ALL
Definition: nodes.h:407
@ SETOPCMD_INTERSECT_ALL
Definition: nodes.h:405
@ SETOPCMD_INTERSECT
Definition: nodes.h:404
@ SETOP_HASHED
Definition: nodes.h:413
#define makeNode(_type_)
Definition: nodes.h:161
#define castNode(_type_, nodeptr)
Definition: nodes.h:182
#define innerPlan(node)
Definition: plannodes.h:240
#define outerPlan(node)
Definition: plannodes.h:241
uintptr_t Datum
Definition: postgres.h:69
char * s1
char * s2
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
Definition: sortsupport.c:134
struct SortSupportData * SortSupport
Definition: sortsupport.h:58
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:200
MemoryContext es_query_cxt
Definition: execnodes.h:708
Plan * plan
Definition: execnodes.h:1159
EState * state
Definition: execnodes.h:1161
TupleDesc ps_ResultTupleDesc
Definition: execnodes.h:1196
ExprContext * ps_ExprContext
Definition: execnodes.h:1198
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1197
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1199
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1165
TupleTableSlot * nextTupleSlot
Definition: execnodes.h:2848
TupleTableSlot * firstTupleSlot
Definition: execnodes.h:2846
bool need_init
Definition: execnodes.h:2863
SortSupport sortKeys
Definition: execnodes.h:2860
TupleHashIterator hashiter
Definition: execnodes.h:2871
bool table_filled
Definition: execnodes.h:2870
SetOpStatePerInput rightInput
Definition: execnodes.h:2862
MemoryContext tableContext
Definition: execnodes.h:2869
PlanState ps
Definition: execnodes.h:2854
Oid * eqfuncoids
Definition: execnodes.h:2866
TupleHashTable hashtable
Definition: execnodes.h:2868
FmgrInfo * hashfunctions
Definition: execnodes.h:2867
SetOpStatePerInput leftInput
Definition: execnodes.h:2861
int64 numOutput
Definition: execnodes.h:2856
bool setop_done
Definition: execnodes.h:2855
SetOpStrategy strategy
Definition: plannodes.h:1383
SetOpCmd cmd
Definition: plannodes.h:1380
int numCols
Definition: plannodes.h:1386
long numGroups
Definition: plannodes.h:1399
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
MemoryContext ssup_cxt
Definition: sortsupport.h:66
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:496
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:458
#define TupIsNull(slot)
Definition: tuptable.h:310
static void slot_getallattrs(TupleTableSlot *slot)
Definition: tuptable.h:372