PostgreSQL Source Code git master
nodeSetOp.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nodeSetOp.c
4 * Routines to handle INTERSECT and EXCEPT selection
5 *
6 * The input of a SetOp node consists of two relations (outer and inner)
7 * with identical column sets. In EXCEPT queries the outer relation is
8 * always the left side, while in INTERSECT cases the planner tries to
9 * make the outer relation be the smaller of the two inputs.
10 *
11 * In SETOP_SORTED mode, each input has been sorted according to all the
12 * grouping columns. The SetOp node essentially performs a merge join on
13 * the grouping columns, except that it is only interested in counting how
14 * many tuples from each input match. Then it is a simple matter to emit
15 * the output demanded by the SQL spec for INTERSECT, INTERSECT ALL, EXCEPT,
16 * or EXCEPT ALL.
17 *
18 * In SETOP_HASHED mode, the inputs are delivered in no particular order.
19 * We read the outer relation and build a hash table in memory with one entry
20 * for each group of identical tuples, counting the number of tuples in the
21 * group. Then we read the inner relation and count the number of tuples
22 * matching each outer group. (We can disregard any tuples appearing only
23 * in the inner relation, since they cannot result in any output.) After
24 * seeing all the input, we scan the hashtable and generate the correct
25 * output using those counts.
26 *
27 * This node type is not used for UNION or UNION ALL, since those can be
28 * implemented more cheaply (there's no need to count the number of
29 * matching tuples).
30 *
31 * Note that SetOp does no qual checking nor projection. The delivered
32 * output tuples are just copies of the first-to-arrive tuple in each
33 * input group.
34 *
35 *
36 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
37 * Portions Copyright (c) 1994, Regents of the University of California
38 *
39 *
40 * IDENTIFICATION
41 * src/backend/executor/nodeSetOp.c
42 *
43 *-------------------------------------------------------------------------
44 */
45
46#include "postgres.h"
47
48#include "access/htup_details.h"
49#include "executor/executor.h"
50#include "executor/nodeSetOp.h"
51#include "miscadmin.h"
52#include "utils/memutils.h"
53
54
55/*
56 * SetOpStatePerGroupData - per-group working state
57 *
58 * In SETOP_SORTED mode, we need only one of these structs, and it's just a
59 * local in setop_retrieve_sorted. In SETOP_HASHED mode, the hash table
60 * contains one of these for each tuple group.
61 */
63{
64 int64 numLeft; /* number of left-input dups in group */
65 int64 numRight; /* number of right-input dups in group */
67
69
70
72static void setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan,
73 SetOpState *setopstate);
75 SetOpState *setopstate);
76static void setop_fill_hash_table(SetOpState *setopstate);
78
79
80/*
81 * Initialize the hash table to empty.
82 */
83static void
85{
86 SetOp *node = (SetOp *) setopstate->ps.plan;
87 ExprContext *econtext = setopstate->ps.ps_ExprContext;
88 TupleDesc desc = ExecGetResultType(outerPlanState(setopstate));
89
91 Assert(node->numGroups > 0);
92
93 /*
94 * If both child plans deliver the same fixed tuple slot type, we can tell
95 * BuildTupleHashTable to expect that slot type as input. Otherwise,
96 * we'll pass NULL denoting that any slot type is possible.
97 */
98 setopstate->hashtable = BuildTupleHashTable(&setopstate->ps,
99 desc,
100 ExecGetCommonChildSlotOps(&setopstate->ps),
101 node->numCols,
102 node->cmpColIdx,
103 setopstate->eqfuncoids,
104 setopstate->hashfunctions,
105 node->cmpCollations,
106 node->numGroups,
108 setopstate->ps.state->es_query_cxt,
109 setopstate->tableContext,
110 econtext->ecxt_per_tuple_memory,
111 false);
112}
113
114/*
115 * We've completed processing a tuple group. Decide how many copies (if any)
116 * of its representative row to emit, and store the count into numOutput.
117 * This logic is straight from the SQL92 specification.
118 */
119static void
121{
122 SetOp *plannode = (SetOp *) setopstate->ps.plan;
123
124 switch (plannode->cmd)
125 {
127 if (pergroup->numLeft > 0 && pergroup->numRight > 0)
128 setopstate->numOutput = 1;
129 else
130 setopstate->numOutput = 0;
131 break;
133 setopstate->numOutput =
134 (pergroup->numLeft < pergroup->numRight) ?
135 pergroup->numLeft : pergroup->numRight;
136 break;
137 case SETOPCMD_EXCEPT:
138 if (pergroup->numLeft > 0 && pergroup->numRight == 0)
139 setopstate->numOutput = 1;
140 else
141 setopstate->numOutput = 0;
142 break;
144 setopstate->numOutput =
145 (pergroup->numLeft < pergroup->numRight) ?
146 0 : (pergroup->numLeft - pergroup->numRight);
147 break;
148 default:
149 elog(ERROR, "unrecognized set op: %d", (int) plannode->cmd);
150 break;
151 }
152}
153
154
155/* ----------------------------------------------------------------
156 * ExecSetOp
157 * ----------------------------------------------------------------
158 */
159static TupleTableSlot * /* return: a tuple or NULL */
161{
162 SetOpState *node = castNode(SetOpState, pstate);
163 SetOp *plannode = (SetOp *) node->ps.plan;
164 TupleTableSlot *resultTupleSlot = node->ps.ps_ResultTupleSlot;
165
167
168 /*
169 * If the previously-returned tuple needs to be returned more than once,
170 * keep returning it.
171 */
172 if (node->numOutput > 0)
173 {
174 node->numOutput--;
175 return resultTupleSlot;
176 }
177
178 /* Otherwise, we're done if we are out of groups */
179 if (node->setop_done)
180 return NULL;
181
182 /* Fetch the next tuple group according to the correct strategy */
183 if (plannode->strategy == SETOP_HASHED)
184 {
185 if (!node->table_filled)
187 return setop_retrieve_hash_table(node);
188 }
189 else
190 return setop_retrieve_sorted(node);
191}
192
193/*
194 * ExecSetOp for non-hashed case
195 */
196static TupleTableSlot *
198{
201 TupleTableSlot *resultTupleSlot;
202
203 /*
204 * get state info from node
205 */
206 outerPlan = outerPlanState(setopstate);
207 innerPlan = innerPlanState(setopstate);
208 resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
209
210 /*
211 * If first time through, establish the invariant that setop_load_group
212 * expects: each side's nextTupleSlot is the next output from the child
213 * plan, or empty if there is no more output from it.
214 */
215 if (setopstate->need_init)
216 {
217 setopstate->need_init = false;
218
220
221 /*
222 * If the outer relation is empty, then we will emit nothing, and we
223 * don't need to read the inner relation at all.
224 */
225 if (TupIsNull(setopstate->leftInput.nextTupleSlot))
226 {
227 setopstate->setop_done = true;
228 return NULL;
229 }
230
232
233 /* Set flags that we've not completed either side's group */
234 setopstate->leftInput.needGroup = true;
235 setopstate->rightInput.needGroup = true;
236 }
237
238 /*
239 * We loop retrieving groups until we find one we should return
240 */
241 while (!setopstate->setop_done)
242 {
243 int cmpresult;
244 SetOpStatePerGroupData pergroup;
245
246 /*
247 * Fetch the rest of the current outer group, if we didn't already.
248 */
249 if (setopstate->leftInput.needGroup)
250 setop_load_group(&setopstate->leftInput, outerPlan, setopstate);
251
252 /*
253 * If no more outer groups, we're done, and don't need to look at any
254 * more of the inner relation.
255 */
256 if (setopstate->leftInput.numTuples == 0)
257 {
258 setopstate->setop_done = true;
259 break;
260 }
261
262 /*
263 * Fetch the rest of the current inner group, if we didn't already.
264 */
265 if (setopstate->rightInput.needGroup)
266 setop_load_group(&setopstate->rightInput, innerPlan, setopstate);
267
268 /*
269 * Determine whether we have matching groups on both sides (this is
270 * basically like the core logic of a merge join).
271 */
272 if (setopstate->rightInput.numTuples == 0)
273 cmpresult = -1; /* as though left input is lesser */
274 else
275 cmpresult = setop_compare_slots(setopstate->leftInput.firstTupleSlot,
276 setopstate->rightInput.firstTupleSlot,
277 setopstate);
278
279 if (cmpresult < 0)
280 {
281 /* Left group is first, and has no right matches */
282 pergroup.numLeft = setopstate->leftInput.numTuples;
283 pergroup.numRight = 0;
284 /* We'll need another left group next time */
285 setopstate->leftInput.needGroup = true;
286 }
287 else if (cmpresult == 0)
288 {
289 /* We have matching groups */
290 pergroup.numLeft = setopstate->leftInput.numTuples;
291 pergroup.numRight = setopstate->rightInput.numTuples;
292 /* We'll need to read from both sides next time */
293 setopstate->leftInput.needGroup = true;
294 setopstate->rightInput.needGroup = true;
295 }
296 else
297 {
298 /* Right group has no left matches, so we can ignore it */
299 setopstate->rightInput.needGroup = true;
300 continue;
301 }
302
303 /*
304 * Done scanning these input tuple groups. See if we should emit any
305 * copies of result tuple, and if so return the first copy. (Note
306 * that the result tuple is the same as the left input's firstTuple
307 * slot.)
308 */
309 set_output_count(setopstate, &pergroup);
310
311 if (setopstate->numOutput > 0)
312 {
313 setopstate->numOutput--;
314 return resultTupleSlot;
315 }
316 }
317
318 /* No more groups */
319 ExecClearTuple(resultTupleSlot);
320 return NULL;
321}
322
323/*
324 * Load next group of tuples from one child plan or the other.
325 *
326 * On entry, we've already read the first tuple of the next group
327 * (if there is one) into input->nextTupleSlot. This invariant
328 * is maintained on exit.
329 */
330static void
332 SetOpState *setopstate)
333{
334 input->needGroup = false;
335
336 /* If we've exhausted this child plan, report an empty group */
337 if (TupIsNull(input->nextTupleSlot))
338 {
339 ExecClearTuple(input->firstTupleSlot);
340 input->numTuples = 0;
341 return;
342 }
343
344 /* Make a local copy of the first tuple for comparisons */
346 input->firstTupleSlot,
347 true);
348 /* and count it */
349 input->numTuples = 1;
350
351 /* Scan till we find the end-of-group */
352 for (;;)
353 {
354 int cmpresult;
355
356 /* Get next input tuple, if there is one */
357 input->nextTupleSlot = ExecProcNode(inputPlan);
358 if (TupIsNull(input->nextTupleSlot))
359 break;
360
361 /* There is; does it belong to same group as firstTuple? */
362 cmpresult = setop_compare_slots(input->firstTupleSlot,
363 input->nextTupleSlot,
364 setopstate);
365 Assert(cmpresult <= 0); /* else input is mis-sorted */
366 if (cmpresult != 0)
367 break;
368
369 /* Still in same group, so count this tuple */
370 input->numTuples++;
371 }
372}
373
374/*
375 * Compare the tuples in the two given slots.
376 */
377static int
379 SetOpState *setopstate)
380{
381 /* We'll often need to fetch all the columns, so just do it */
384 for (int nkey = 0; nkey < setopstate->numCols; nkey++)
385 {
386 SortSupport sortKey = setopstate->sortKeys + nkey;
387 AttrNumber attno = sortKey->ssup_attno;
388 Datum datum1 = s1->tts_values[attno - 1],
389 datum2 = s2->tts_values[attno - 1];
390 bool isNull1 = s1->tts_isnull[attno - 1],
391 isNull2 = s2->tts_isnull[attno - 1];
392 int compare;
393
394 compare = ApplySortComparator(datum1, isNull1,
395 datum2, isNull2,
396 sortKey);
397 if (compare != 0)
398 return compare;
399 }
400 return 0;
401}
402
403/*
404 * ExecSetOp for hashed case: phase 1, read inputs and build hash table
405 */
406static void
408{
411 ExprContext *econtext = setopstate->ps.ps_ExprContext;
412 bool have_tuples = false;
413
414 /*
415 * get state info from node
416 */
417 outerPlan = outerPlanState(setopstate);
418 innerPlan = innerPlanState(setopstate);
419
420 /*
421 * Process each outer-plan tuple, and then fetch the next one, until we
422 * exhaust the outer plan.
423 */
424 for (;;)
425 {
426 TupleTableSlot *outerslot;
427 TupleHashEntryData *entry;
428 bool isnew;
429
430 outerslot = ExecProcNode(outerPlan);
431 if (TupIsNull(outerslot))
432 break;
433 have_tuples = true;
434
435 /* Find or build hashtable entry for this tuple's group */
436 entry = LookupTupleHashEntry(setopstate->hashtable,
437 outerslot,
438 &isnew, NULL);
439
440 /* If new tuple group, initialize counts to zero */
441 if (isnew)
442 {
445 sizeof(SetOpStatePerGroupData));
446 }
447
448 /* Advance the counts */
449 ((SetOpStatePerGroup) entry->additional)->numLeft++;
450
451 /* Must reset expression context after each hashtable lookup */
452 ResetExprContext(econtext);
453 }
454
455 /*
456 * If the outer relation is empty, then we will emit nothing, and we don't
457 * need to read the inner relation at all.
458 */
459 if (have_tuples)
460 {
461 /*
462 * Process each inner-plan tuple, and then fetch the next one, until
463 * we exhaust the inner plan.
464 */
465 for (;;)
466 {
467 TupleTableSlot *innerslot;
468 TupleHashEntryData *entry;
469
470 innerslot = ExecProcNode(innerPlan);
471 if (TupIsNull(innerslot))
472 break;
473
474 /* For tuples not seen previously, do not make hashtable entry */
475 entry = LookupTupleHashEntry(setopstate->hashtable,
476 innerslot,
477 NULL, NULL);
478
479 /* Advance the counts if entry is already present */
480 if (entry)
481 ((SetOpStatePerGroup) entry->additional)->numRight++;
482
483 /* Must reset expression context after each hashtable lookup */
484 ResetExprContext(econtext);
485 }
486 }
487
488 setopstate->table_filled = true;
489 /* Initialize to walk the hash table */
490 ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
491}
492
493/*
494 * ExecSetOp for hashed case: phase 2, retrieving groups from hash table
495 */
496static TupleTableSlot *
498{
499 TupleHashEntryData *entry;
500 TupleTableSlot *resultTupleSlot;
501
502 /*
503 * get state info from node
504 */
505 resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
506
507 /*
508 * We loop retrieving groups until we find one we should return
509 */
510 while (!setopstate->setop_done)
511 {
513
514 /*
515 * Find the next entry in the hash table
516 */
517 entry = ScanTupleHashTable(setopstate->hashtable, &setopstate->hashiter);
518 if (entry == NULL)
519 {
520 /* No more entries in hashtable, so done */
521 setopstate->setop_done = true;
522 return NULL;
523 }
524
525 /*
526 * See if we should emit any copies of this tuple, and if so return
527 * the first copy.
528 */
529 set_output_count(setopstate, (SetOpStatePerGroup) entry->additional);
530
531 if (setopstate->numOutput > 0)
532 {
533 setopstate->numOutput--;
534 return ExecStoreMinimalTuple(entry->firstTuple,
535 resultTupleSlot,
536 false);
537 }
538 }
539
540 /* No more groups */
541 ExecClearTuple(resultTupleSlot);
542 return NULL;
543}
544
545/* ----------------------------------------------------------------
546 * ExecInitSetOp
547 *
548 * This initializes the setop node state structures and
549 * the node's subplan.
550 * ----------------------------------------------------------------
551 */
553ExecInitSetOp(SetOp *node, EState *estate, int eflags)
554{
555 SetOpState *setopstate;
556
557 /* check for unsupported flags */
559
560 /*
561 * create state structure
562 */
563 setopstate = makeNode(SetOpState);
564 setopstate->ps.plan = (Plan *) node;
565 setopstate->ps.state = estate;
566 setopstate->ps.ExecProcNode = ExecSetOp;
567
568 setopstate->setop_done = false;
569 setopstate->numOutput = 0;
570 setopstate->numCols = node->numCols;
571 setopstate->need_init = true;
572
573 /*
574 * create expression context
575 */
576 ExecAssignExprContext(estate, &setopstate->ps);
577
578 /*
579 * If hashing, we also need a longer-lived context to store the hash
580 * table. The table can't just be kept in the per-query context because
581 * we want to be able to throw it away in ExecReScanSetOp.
582 */
583 if (node->strategy == SETOP_HASHED)
584 setopstate->tableContext =
586 "SetOp hash table",
588
589 /*
590 * initialize child nodes
591 *
592 * If we are hashing then the child plans do not need to handle REWIND
593 * efficiently; see ExecReScanSetOp.
594 */
595 if (node->strategy == SETOP_HASHED)
596 eflags &= ~EXEC_FLAG_REWIND;
597 outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags);
598 innerPlanState(setopstate) = ExecInitNode(innerPlan(node), estate, eflags);
599
600 /*
601 * Initialize locally-allocated slots. In hashed mode, we just need a
602 * result slot. In sorted mode, we need one first-tuple-of-group slot for
603 * each input; we use the result slot for the left input's slot and create
604 * another for the right input. (Note: the nextTupleSlot slots are not
605 * ours, but just point to the last slot returned by the input plan node.)
606 */
608 if (node->strategy != SETOP_HASHED)
609 {
610 setopstate->leftInput.firstTupleSlot =
611 setopstate->ps.ps_ResultTupleSlot;
612 setopstate->rightInput.firstTupleSlot =
614 setopstate->ps.ps_ResultTupleDesc,
616 }
617
618 /* Setop nodes do no projections. */
619 setopstate->ps.ps_ProjInfo = NULL;
620
621 /*
622 * Precompute fmgr lookup data for inner loop. We need equality and
623 * hashing functions to do it by hashing, while for sorting we need
624 * SortSupport data.
625 */
626 if (node->strategy == SETOP_HASHED)
628 node->cmpOperators,
629 &setopstate->eqfuncoids,
630 &setopstate->hashfunctions);
631 else
632 {
633 int nkeys = node->numCols;
634
635 setopstate->sortKeys = (SortSupport)
636 palloc0(nkeys * sizeof(SortSupportData));
637 for (int i = 0; i < nkeys; i++)
638 {
639 SortSupport sortKey = setopstate->sortKeys + i;
640
642 sortKey->ssup_collation = node->cmpCollations[i];
643 sortKey->ssup_nulls_first = node->cmpNullsFirst[i];
644 sortKey->ssup_attno = node->cmpColIdx[i];
645 /* abbreviated key conversion is not useful here */
646 sortKey->abbreviate = false;
647
648 PrepareSortSupportFromOrderingOp(node->cmpOperators[i], sortKey);
649 }
650 }
651
652 /* Create a hash table if needed */
653 if (node->strategy == SETOP_HASHED)
654 {
655 build_hash_table(setopstate);
656 setopstate->table_filled = false;
657 }
658
659 return setopstate;
660}
661
662/* ----------------------------------------------------------------
663 * ExecEndSetOp
664 *
665 * This shuts down the subplans and frees resources allocated
666 * to this node.
667 * ----------------------------------------------------------------
668 */
669void
671{
672 /* free subsidiary stuff including hashtable */
673 if (node->tableContext)
675
678}
679
680
681void
683{
686
688 node->setop_done = false;
689 node->numOutput = 0;
690
691 if (((SetOp *) node->ps.plan)->strategy == SETOP_HASHED)
692 {
693 /*
694 * In the hashed case, if we haven't yet built the hash table then we
695 * can just return; nothing done yet, so nothing to undo. If subnode's
696 * chgParam is not NULL then it will be re-scanned by ExecProcNode,
697 * else no reason to re-scan it at all.
698 */
699 if (!node->table_filled)
700 return;
701
702 /*
703 * If we do have the hash table and the subplans do not have any
704 * parameter changes, then we can just rescan the existing hash table;
705 * no need to build it again.
706 */
707 if (outerPlan->chgParam == NULL && innerPlan->chgParam == NULL)
708 {
710 return;
711 }
712
713 /* Release any hashtable storage */
714 if (node->tableContext)
716
717 /* And rebuild an empty hashtable */
719 node->table_filled = false;
720 }
721 else
722 {
723 /* Need to re-read first input from each side */
724 node->need_init = true;
725 }
726
727 /*
728 * if chgParam of subnode is not null then plan will be re-scanned by
729 * first ExecProcNode.
730 */
731 if (outerPlan->chgParam == NULL)
733 if (innerPlan->chgParam == NULL)
735}
int16 AttrNumber
Definition: attnum.h:21
#define Assert(condition)
Definition: c.h:815
int64_t int64
Definition: c.h:485
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
void ExecReScan(PlanState *node)
Definition: execAmi.c:76
void execTuplesHashPrepare(int numCols, const Oid *eqOperators, Oid **eqFuncOids, FmgrInfo **hashFunctions)
Definition: execGrouping.c:97
TupleHashEntry LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, bool *isnew, uint32 *hash)
Definition: execGrouping.c:292
TupleHashTable BuildTupleHashTable(PlanState *parent, TupleDesc inputDesc, const TupleTableSlotOps *inputOps, int numCols, AttrNumber *keyColIdx, const Oid *eqfuncoids, FmgrInfo *hashfunctions, Oid *collations, long nbuckets, Size additionalsize, MemoryContext metacxt, MemoryContext tablecxt, MemoryContext tempcxt, bool use_variable_hash_iv)
Definition: execGrouping.c:161
void ResetTupleHashTable(TupleHashTable hashtable)
Definition: execGrouping.c:271
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:562
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1633
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate, TupleDesc tupledesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:2018
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1986
const TupleTableSlotOps TTSOpsMinimalTuple
Definition: execTuples.c:86
TupleDesc ExecGetResultType(PlanState *planstate)
Definition: execUtils.c:496
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:486
const TupleTableSlotOps * ExecGetCommonChildSlotOps(PlanState *ps)
Definition: execUtils.c:564
#define outerPlanState(node)
Definition: execnodes.h:1246
#define ScanTupleHashTable(htable, iter)
Definition: execnodes.h:884
#define ResetTupleHashIterator(htable, iter)
Definition: execnodes.h:882
#define innerPlanState(node)
Definition: execnodes.h:1245
#define EXEC_FLAG_BACKWARD
Definition: executor.h:68
#define ResetExprContext(econtext)
Definition: executor.h:557
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:267
#define EXEC_FLAG_MARK
Definition: executor.h:69
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
FILE * input
int i
Definition: isn.c:72
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:383
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1215
void * palloc0(Size size)
Definition: mcxt.c:1347
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:454
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
static void setop_fill_hash_table(SetOpState *setopstate)
Definition: nodeSetOp.c:407
static void setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan, SetOpState *setopstate)
Definition: nodeSetOp.c:331
static void build_hash_table(SetOpState *setopstate)
Definition: nodeSetOp.c:84
void ExecEndSetOp(SetOpState *node)
Definition: nodeSetOp.c:670
static void set_output_count(SetOpState *setopstate, SetOpStatePerGroup pergroup)
Definition: nodeSetOp.c:120
static TupleTableSlot * ExecSetOp(PlanState *pstate)
Definition: nodeSetOp.c:160
static TupleTableSlot * setop_retrieve_hash_table(SetOpState *setopstate)
Definition: nodeSetOp.c:497
static int setop_compare_slots(TupleTableSlot *s1, TupleTableSlot *s2, SetOpState *setopstate)
Definition: nodeSetOp.c:378
static TupleTableSlot * setop_retrieve_sorted(SetOpState *setopstate)
Definition: nodeSetOp.c:197
struct SetOpStatePerGroupData SetOpStatePerGroupData
SetOpState * ExecInitSetOp(SetOp *node, EState *estate, int eflags)
Definition: nodeSetOp.c:553
SetOpStatePerGroupData * SetOpStatePerGroup
Definition: nodeSetOp.c:68
void ExecReScanSetOp(SetOpState *node)
Definition: nodeSetOp.c:682
@ SETOPCMD_EXCEPT
Definition: nodes.h:400
@ SETOPCMD_EXCEPT_ALL
Definition: nodes.h:401
@ SETOPCMD_INTERSECT_ALL
Definition: nodes.h:399
@ SETOPCMD_INTERSECT
Definition: nodes.h:398
@ SETOP_HASHED
Definition: nodes.h:407
#define makeNode(_type_)
Definition: nodes.h:155
#define castNode(_type_, nodeptr)
Definition: nodes.h:176
#define innerPlan(node)
Definition: plannodes.h:230
#define outerPlan(node)
Definition: plannodes.h:231
uintptr_t Datum
Definition: postgres.h:69
char * s1
char * s2
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
Definition: sortsupport.c:134
struct SortSupportData * SortSupport
Definition: sortsupport.h:58
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:200
MemoryContext es_query_cxt
Definition: execnodes.h:700
Plan * plan
Definition: execnodes.h:1150
EState * state
Definition: execnodes.h:1152
TupleDesc ps_ResultTupleDesc
Definition: execnodes.h:1187
ExprContext * ps_ExprContext
Definition: execnodes.h:1189
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1188
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1190
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1156
TupleTableSlot * nextTupleSlot
Definition: execnodes.h:2844
TupleTableSlot * firstTupleSlot
Definition: execnodes.h:2842
bool need_init
Definition: execnodes.h:2859
SortSupport sortKeys
Definition: execnodes.h:2856
TupleHashIterator hashiter
Definition: execnodes.h:2867
bool table_filled
Definition: execnodes.h:2866
SetOpStatePerInput rightInput
Definition: execnodes.h:2858
MemoryContext tableContext
Definition: execnodes.h:2865
PlanState ps
Definition: execnodes.h:2850
Oid * eqfuncoids
Definition: execnodes.h:2862
TupleHashTable hashtable
Definition: execnodes.h:2864
FmgrInfo * hashfunctions
Definition: execnodes.h:2863
SetOpStatePerInput leftInput
Definition: execnodes.h:2857
int64 numOutput
Definition: execnodes.h:2852
bool setop_done
Definition: execnodes.h:2851
SetOpStrategy strategy
Definition: plannodes.h:1370
SetOpCmd cmd
Definition: plannodes.h:1367
int numCols
Definition: plannodes.h:1373
long numGroups
Definition: plannodes.h:1386
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
MemoryContext ssup_cxt
Definition: sortsupport.h:66
MinimalTuple firstTuple
Definition: execnodes.h:839
MemoryContext tablecxt
Definition: execnodes.h:861
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:492
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:454
#define TupIsNull(slot)
Definition: tuptable.h:306
static void slot_getallattrs(TupleTableSlot *slot)
Definition: tuptable.h:368