PostgreSQL Source Code  git master
nodeAppend.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nodeAppend.c
4  * routines to handle append nodes.
5  *
6  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/executor/nodeAppend.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /* INTERFACE ROUTINES
16  * ExecInitAppend - initialize the append node
17  * ExecAppend - retrieve the next tuple from the node
18  * ExecEndAppend - shut down the append node
19  * ExecReScanAppend - rescan the append node
20  *
21  * NOTES
22  * Each append node contains a list of one or more subplans which
23  * must be iteratively processed (forwards or backwards).
24  * Tuples are retrieved by executing the 'whichplan'th subplan
25  * until the subplan stops returning tuples, at which point that
26  * plan is shut down and the next started up.
27  *
28  * Append nodes don't make use of their left and right
29  * subtrees, rather they maintain a list of subplans so
30  * a typical append node looks like this in the plan tree:
31  *
32  * ...
33  * /
34  * Append -------+------+------+--- nil
35  * / \ | | |
36  * nil nil ... ... ...
37  * subplans
38  *
39  * Append nodes are currently used for unions, and to support
40  * inheritance queries, where several relations need to be scanned.
41  * For example, in our standard person/student/employee/student-emp
42  * example, where student and employee inherit from person
43  * and student-emp inherits from student and employee, the
44  * query:
45  *
46  * select name from person
47  *
48  * generates the plan:
49  *
50  * |
51  * Append -------+-------+--------+--------+
52  * / \ | | | |
53  * nil nil Scan Scan Scan Scan
54  * | | | |
55  * person employee student student-emp
56  */
57 
58 #include "postgres.h"
59 
60 #include "executor/execdebug.h"
61 #include "executor/execPartition.h"
62 #include "executor/nodeAppend.h"
63 #include "miscadmin.h"
64 
65 /* Shared state for parallel-aware Append. */
67 {
68  LWLock pa_lock; /* mutual exclusion to choose next subplan */
69  int pa_next_plan; /* next plan to choose by any worker */
70 
71  /*
72  * pa_finished[i] should be true if no more workers should select subplan
73  * i. for a non-partial plan, this should be set to true as soon as a
74  * worker selects the plan; for a partial plan, it remains false until
75  * some worker executes the plan to completion.
76  */
77  bool pa_finished[FLEXIBLE_ARRAY_MEMBER];
78 };
79 
80 #define INVALID_SUBPLAN_INDEX -1
81 
82 static TupleTableSlot *ExecAppend(PlanState *pstate);
83 static bool choose_next_subplan_locally(AppendState *node);
87 
88 /* ----------------------------------------------------------------
89  * ExecInitAppend
90  *
91  * Begin all of the subscans of the append node.
92  *
93  * (This is potentially wasteful, since the entire result of the
94  * append node may not be scanned, but this way all of the
95  * structures get allocated in the executor's top level memory
96  * block instead of that of the call to ExecAppend.)
97  * ----------------------------------------------------------------
98  */
100 ExecInitAppend(Append *node, EState *estate, int eflags)
101 {
102  AppendState *appendstate = makeNode(AppendState);
103  PlanState **appendplanstates;
104  Bitmapset *validsubplans;
105  int nplans;
106  int firstvalid;
107  int i,
108  j;
109 
110  /* check for unsupported flags */
111  Assert(!(eflags & EXEC_FLAG_MARK));
112 
113  /*
114  * create new AppendState for our append node
115  */
116  appendstate->ps.plan = (Plan *) node;
117  appendstate->ps.state = estate;
118  appendstate->ps.ExecProcNode = ExecAppend;
119 
120  /* Let choose_next_subplan_* function handle setting the first subplan */
121  appendstate->as_whichplan = INVALID_SUBPLAN_INDEX;
122 
123  /* If run-time partition pruning is enabled, then set that up now */
124  if (node->part_prune_info != NULL)
125  {
126  PartitionPruneState *prunestate;
127 
128  /* We may need an expression context to evaluate partition exprs */
129  ExecAssignExprContext(estate, &appendstate->ps);
130 
131  /* Create the working data structure for pruning. */
132  prunestate = ExecCreatePartitionPruneState(&appendstate->ps,
133  node->part_prune_info);
134  appendstate->as_prune_state = prunestate;
135 
136  /* Perform an initial partition prune, if required. */
137  if (prunestate->do_initial_prune)
138  {
139  /* Determine which subplans survive initial pruning */
140  validsubplans = ExecFindInitialMatchingSubPlans(prunestate,
141  list_length(node->appendplans));
142 
143  nplans = bms_num_members(validsubplans);
144  }
145  else
146  {
147  /* We'll need to initialize all subplans */
148  nplans = list_length(node->appendplans);
149  Assert(nplans > 0);
150  validsubplans = bms_add_range(NULL, 0, nplans - 1);
151  }
152 
153  /*
154  * When no run-time pruning is required and there's at least one
155  * subplan, we can fill as_valid_subplans immediately, preventing
156  * later calls to ExecFindMatchingSubPlans.
157  */
158  if (!prunestate->do_exec_prune && nplans > 0)
159  appendstate->as_valid_subplans = bms_add_range(NULL, 0, nplans - 1);
160  }
161  else
162  {
163  nplans = list_length(node->appendplans);
164 
165  /*
166  * When run-time partition pruning is not enabled we can just mark all
167  * subplans as valid; they must also all be initialized.
168  */
169  Assert(nplans > 0);
170  appendstate->as_valid_subplans = validsubplans =
171  bms_add_range(NULL, 0, nplans - 1);
172  appendstate->as_prune_state = NULL;
173  }
174 
175  /*
176  * Initialize result tuple type and slot.
177  */
178  ExecInitResultTupleSlotTL(&appendstate->ps, &TTSOpsVirtual);
179 
180  /* node returns slots from each of its subnodes, therefore not fixed */
181  appendstate->ps.resultopsset = true;
182  appendstate->ps.resultopsfixed = false;
183 
184  appendplanstates = (PlanState **) palloc(nplans *
185  sizeof(PlanState *));
186 
187  /*
188  * call ExecInitNode on each of the valid plans to be executed and save
189  * the results into the appendplanstates array.
190  *
191  * While at it, find out the first valid partial plan.
192  */
193  j = 0;
194  firstvalid = nplans;
195  i = -1;
196  while ((i = bms_next_member(validsubplans, i)) >= 0)
197  {
198  Plan *initNode = (Plan *) list_nth(node->appendplans, i);
199 
200  /*
201  * Record the lowest appendplans index which is a valid partial plan.
202  */
203  if (i >= node->first_partial_plan && j < firstvalid)
204  firstvalid = j;
205 
206  appendplanstates[j++] = ExecInitNode(initNode, estate, eflags);
207  }
208 
209  appendstate->as_first_partial_plan = firstvalid;
210  appendstate->appendplans = appendplanstates;
211  appendstate->as_nplans = nplans;
212 
213  /*
214  * Miscellaneous initialization
215  */
216 
217  appendstate->ps.ps_ProjInfo = NULL;
218 
219  /* For parallel query, this will be overridden later. */
221 
222  return appendstate;
223 }
224 
225 /* ----------------------------------------------------------------
226  * ExecAppend
227  *
228  * Handles iteration over multiple subplans.
229  * ----------------------------------------------------------------
230  */
231 static TupleTableSlot *
233 {
234  AppendState *node = castNode(AppendState, pstate);
235 
236  if (node->as_whichplan < 0)
237  {
238  /* Nothing to do if there are no subplans */
239  if (node->as_nplans == 0)
240  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
241 
242  /*
243  * If no subplan has been chosen, we must choose one before
244  * proceeding.
245  */
246  if (node->as_whichplan == INVALID_SUBPLAN_INDEX &&
247  !node->choose_next_subplan(node))
248  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
249  }
250 
251  for (;;)
252  {
253  PlanState *subnode;
254  TupleTableSlot *result;
255 
257 
258  /*
259  * figure out which subplan we are currently processing
260  */
261  Assert(node->as_whichplan >= 0 && node->as_whichplan < node->as_nplans);
262  subnode = node->appendplans[node->as_whichplan];
263 
264  /*
265  * get a tuple from the subplan
266  */
267  result = ExecProcNode(subnode);
268 
269  if (!TupIsNull(result))
270  {
271  /*
272  * If the subplan gave us something then return it as-is. We do
273  * NOT make use of the result slot that was set up in
274  * ExecInitAppend; there's no need for it.
275  */
276  return result;
277  }
278 
279  /* choose new subplan; if none, we're done */
280  if (!node->choose_next_subplan(node))
281  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
282  }
283 }
284 
285 /* ----------------------------------------------------------------
286  * ExecEndAppend
287  *
288  * Shuts down the subscans of the append node.
289  *
290  * Returns nothing of interest.
291  * ----------------------------------------------------------------
292  */
293 void
295 {
296  PlanState **appendplans;
297  int nplans;
298  int i;
299 
300  /*
301  * get information from the node
302  */
303  appendplans = node->appendplans;
304  nplans = node->as_nplans;
305 
306  /*
307  * shut down each of the subscans
308  */
309  for (i = 0; i < nplans; i++)
310  ExecEndNode(appendplans[i]);
311 }
312 
313 void
315 {
316  int i;
317 
318  /*
319  * If any PARAM_EXEC Params used in pruning expressions have changed, then
320  * we'd better unset the valid subplans so that they are reselected for
321  * the new parameter values.
322  */
323  if (node->as_prune_state &&
324  bms_overlap(node->ps.chgParam,
326  {
328  node->as_valid_subplans = NULL;
329  }
330 
331  for (i = 0; i < node->as_nplans; i++)
332  {
333  PlanState *subnode = node->appendplans[i];
334 
335  /*
336  * ExecReScan doesn't know about my subplans, so I have to do
337  * changed-parameter signaling myself.
338  */
339  if (node->ps.chgParam != NULL)
340  UpdateChangedParamSet(subnode, node->ps.chgParam);
341 
342  /*
343  * If chgParam of subnode is not null then plan will be re-scanned by
344  * first ExecProcNode.
345  */
346  if (subnode->chgParam == NULL)
347  ExecReScan(subnode);
348  }
349 
350  /* Let choose_next_subplan_* function handle setting the first subplan */
352 }
353 
354 /* ----------------------------------------------------------------
355  * Parallel Append Support
356  * ----------------------------------------------------------------
357  */
358 
359 /* ----------------------------------------------------------------
360  * ExecAppendEstimate
361  *
362  * Compute the amount of space we'll need in the parallel
363  * query DSM, and inform pcxt->estimator about our needs.
364  * ----------------------------------------------------------------
365  */
366 void
368  ParallelContext *pcxt)
369 {
370  node->pstate_len =
372  sizeof(bool) * node->as_nplans);
373 
375  shm_toc_estimate_keys(&pcxt->estimator, 1);
376 }
377 
378 
379 /* ----------------------------------------------------------------
380  * ExecAppendInitializeDSM
381  *
382  * Set up shared state for Parallel Append.
383  * ----------------------------------------------------------------
384  */
385 void
387  ParallelContext *pcxt)
388 {
389  ParallelAppendState *pstate;
390 
391  pstate = shm_toc_allocate(pcxt->toc, node->pstate_len);
392  memset(pstate, 0, node->pstate_len);
394  shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate);
395 
396  node->as_pstate = pstate;
398 }
399 
400 /* ----------------------------------------------------------------
401  * ExecAppendReInitializeDSM
402  *
403  * Reset shared state before beginning a fresh scan.
404  * ----------------------------------------------------------------
405  */
406 void
408 {
409  ParallelAppendState *pstate = node->as_pstate;
410 
411  pstate->pa_next_plan = 0;
412  memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans);
413 }
414 
415 /* ----------------------------------------------------------------
416  * ExecAppendInitializeWorker
417  *
418  * Copy relevant information from TOC into planstate, and initialize
419  * whatever is required to choose and execute the optimal subplan.
420  * ----------------------------------------------------------------
421  */
422 void
424 {
425  node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
427 }
428 
429 /* ----------------------------------------------------------------
430  * choose_next_subplan_locally
431  *
432  * Choose next subplan for a non-parallel-aware Append,
433  * returning false if there are no more.
434  * ----------------------------------------------------------------
435  */
436 static bool
438 {
439  int whichplan = node->as_whichplan;
440  int nextplan;
441 
442  /* We should never be called when there are no subplans */
443  Assert(node->as_nplans > 0);
444 
445  /*
446  * If first call then have the bms member function choose the first valid
447  * subplan by initializing whichplan to -1. If there happen to be no
448  * valid subplans then the bms member function will handle that by
449  * returning a negative number which will allow us to exit returning a
450  * false value.
451  */
452  if (whichplan == INVALID_SUBPLAN_INDEX)
453  {
454  if (node->as_valid_subplans == NULL)
455  node->as_valid_subplans =
457 
458  whichplan = -1;
459  }
460 
461  /* Ensure whichplan is within the expected range */
462  Assert(whichplan >= -1 && whichplan <= node->as_nplans);
463 
465  nextplan = bms_next_member(node->as_valid_subplans, whichplan);
466  else
467  nextplan = bms_prev_member(node->as_valid_subplans, whichplan);
468 
469  if (nextplan < 0)
470  return false;
471 
472  node->as_whichplan = nextplan;
473 
474  return true;
475 }
476 
477 /* ----------------------------------------------------------------
478  * choose_next_subplan_for_leader
479  *
480  * Try to pick a plan which doesn't commit us to doing much
481  * work locally, so that as much work as possible is done in
482  * the workers. Cheapest subplans are at the end.
483  * ----------------------------------------------------------------
484  */
485 static bool
487 {
488  ParallelAppendState *pstate = node->as_pstate;
489 
490  /* Backward scan is not supported by parallel-aware plans */
492 
493  /* We should never be called when there are no subplans */
494  Assert(node->as_nplans > 0);
495 
497 
498  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
499  {
500  /* Mark just-completed subplan as finished. */
501  node->as_pstate->pa_finished[node->as_whichplan] = true;
502  }
503  else
504  {
505  /* Start with last subplan. */
506  node->as_whichplan = node->as_nplans - 1;
507 
508  /*
509  * If we've yet to determine the valid subplans then do so now. If
510  * run-time pruning is disabled then the valid subplans will always be
511  * set to all subplans.
512  */
513  if (node->as_valid_subplans == NULL)
514  {
515  node->as_valid_subplans =
517 
518  /*
519  * Mark each invalid plan as finished to allow the loop below to
520  * select the first valid subplan.
521  */
523  }
524  }
525 
526  /* Loop until we find a subplan to execute. */
527  while (pstate->pa_finished[node->as_whichplan])
528  {
529  if (node->as_whichplan == 0)
530  {
533  LWLockRelease(&pstate->pa_lock);
534  return false;
535  }
536 
537  /*
538  * We needn't pay attention to as_valid_subplans here as all invalid
539  * plans have been marked as finished.
540  */
541  node->as_whichplan--;
542  }
543 
544  /* If non-partial, immediately mark as finished. */
545  if (node->as_whichplan < node->as_first_partial_plan)
546  node->as_pstate->pa_finished[node->as_whichplan] = true;
547 
548  LWLockRelease(&pstate->pa_lock);
549 
550  return true;
551 }
552 
553 /* ----------------------------------------------------------------
554  * choose_next_subplan_for_worker
555  *
556  * Choose next subplan for a parallel-aware Append, returning
557  * false if there are no more.
558  *
559  * We start from the first plan and advance through the list;
560  * when we get back to the end, we loop back to the first
561  * partial plan. This assigns the non-partial plans first in
562  * order of descending cost and then spreads out the workers
563  * as evenly as possible across the remaining partial plans.
564  * ----------------------------------------------------------------
565  */
566 static bool
568 {
569  ParallelAppendState *pstate = node->as_pstate;
570 
571  /* Backward scan is not supported by parallel-aware plans */
573 
574  /* We should never be called when there are no subplans */
575  Assert(node->as_nplans > 0);
576 
578 
579  /* Mark just-completed subplan as finished. */
580  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
581  node->as_pstate->pa_finished[node->as_whichplan] = true;
582 
583  /*
584  * If we've yet to determine the valid subplans then do so now. If
585  * run-time pruning is disabled then the valid subplans will always be set
586  * to all subplans.
587  */
588  else if (node->as_valid_subplans == NULL)
589  {
590  node->as_valid_subplans =
593  }
594 
595  /* If all the plans are already done, we have nothing to do */
596  if (pstate->pa_next_plan == INVALID_SUBPLAN_INDEX)
597  {
598  LWLockRelease(&pstate->pa_lock);
599  return false;
600  }
601 
602  /* Save the plan from which we are starting the search. */
603  node->as_whichplan = pstate->pa_next_plan;
604 
605  /* Loop until we find a valid subplan to execute. */
606  while (pstate->pa_finished[pstate->pa_next_plan])
607  {
608  int nextplan;
609 
610  nextplan = bms_next_member(node->as_valid_subplans,
611  pstate->pa_next_plan);
612  if (nextplan >= 0)
613  {
614  /* Advance to the next valid plan. */
615  pstate->pa_next_plan = nextplan;
616  }
617  else if (node->as_whichplan > node->as_first_partial_plan)
618  {
619  /*
620  * Try looping back to the first valid partial plan, if there is
621  * one. If there isn't, arrange to bail out below.
622  */
623  nextplan = bms_next_member(node->as_valid_subplans,
624  node->as_first_partial_plan - 1);
625  pstate->pa_next_plan =
626  nextplan < 0 ? node->as_whichplan : nextplan;
627  }
628  else
629  {
630  /*
631  * At last plan, and either there are no partial plans or we've
632  * tried them all. Arrange to bail out.
633  */
634  pstate->pa_next_plan = node->as_whichplan;
635  }
636 
637  if (pstate->pa_next_plan == node->as_whichplan)
638  {
639  /* We've tried everything! */
641  LWLockRelease(&pstate->pa_lock);
642  return false;
643  }
644  }
645 
646  /* Pick the plan we found, and advance pa_next_plan one more time. */
647  node->as_whichplan = pstate->pa_next_plan;
649  pstate->pa_next_plan);
650 
651  /*
652  * If there are no more valid plans then try setting the next plan to the
653  * first valid partial plan.
654  */
655  if (pstate->pa_next_plan < 0)
656  {
657  int nextplan = bms_next_member(node->as_valid_subplans,
658  node->as_first_partial_plan - 1);
659 
660  if (nextplan >= 0)
661  pstate->pa_next_plan = nextplan;
662  else
663  {
664  /*
665  * There are no valid partial plans, and we already chose the last
666  * non-partial plan; so flag that there's nothing more for our
667  * fellow workers to do.
668  */
670  }
671  }
672 
673  /* If non-partial, immediately mark as finished. */
674  if (node->as_whichplan < node->as_first_partial_plan)
675  node->as_pstate->pa_finished[node->as_whichplan] = true;
676 
677  LWLockRelease(&pstate->pa_lock);
678 
679  return true;
680 }
681 
682 /*
683  * mark_invalid_subplans_as_finished
684  * Marks the ParallelAppendState's pa_finished as true for each invalid
685  * subplan.
686  *
687  * This function should only be called for parallel Append with run-time
688  * pruning enabled.
689  */
690 static void
692 {
693  int i;
694 
695  /* Only valid to call this while in parallel Append mode */
696  Assert(node->as_pstate);
697 
698  /* Shouldn't have been called when run-time pruning is not enabled */
699  Assert(node->as_prune_state);
700 
701  /* Nothing to do if all plans are valid */
702  if (bms_num_members(node->as_valid_subplans) == node->as_nplans)
703  return;
704 
705  /* Mark all non-valid plans as finished */
706  for (i = 0; i < node->as_nplans; i++)
707  {
708  if (!bms_is_member(i, node->as_valid_subplans))
709  node->as_pstate->pa_finished[i] = true;
710  }
711 }
Size pstate_len
Definition: execnodes.h:1222
Definition: lwlock.h:32
void ExecAppendInitializeWorker(AppendState *node, ParallelWorkerContext *pwcxt)
Definition: nodeAppend.c:423
static TupleTableSlot * ExecAppend(PlanState *pstate)
Definition: nodeAppend.c:232
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:979
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1223
#define castNode(_type_, nodeptr)
Definition: nodes.h:594
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:537
void ExecAppendReInitializeDSM(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:407
shm_toc_estimator estimator
Definition: parallel.h:41
static bool choose_next_subplan_locally(AppendState *node)
Definition: nodeAppend.c:437
void ExecReScan(PlanState *node)
Definition: execAmi.c:75
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
int plan_node_id
Definition: plannodes.h:141
AppendState * ExecInitAppend(Append *node, EState *estate, int eflags)
Definition: nodeAppend.c:100
EState * state
Definition: execnodes.h:941
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
ScanDirection es_direction
Definition: execnodes.h:501
PlanState ps
Definition: execnodes.h:1215
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
List * appendplans
Definition: plannodes.h:255
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:977
Bitmapset * bms_add_range(Bitmapset *a, int lower, int upper)
Definition: bitmapset.c:834
ParallelAppendState * as_pstate
Definition: execnodes.h:1221
static void * list_nth(const List *list, int n)
Definition: pg_list.h:277
void ExecEndAppend(AppendState *node)
Definition: nodeAppend.c:294
int first_partial_plan
Definition: plannodes.h:261
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1225
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:646
Bitmapset * ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate, int nsubplans)
#define TupIsNull(slot)
Definition: tuptable.h:292
int as_first_partial_plan
Definition: execnodes.h:1219
Bitmapset * execparamids
bool resultopsset
Definition: execnodes.h:1022
Bitmapset * chgParam
Definition: execnodes.h:971
static bool choose_next_subplan_for_worker(AppendState *node)
Definition: nodeAppend.c:567
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:678
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:945
Bitmapset * as_valid_subplans
Definition: execnodes.h:1224
void ExecReScanAppend(AppendState *node)
Definition: nodeAppend.c:314
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:235
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:77
Plan * plan
Definition: execnodes.h:939
int bms_prev_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1102
void UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
Definition: execUtils.c:802
void ExecAppendInitializeDSM(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:386
static void mark_invalid_subplans_as_finished(AppendState *node)
Definition: nodeAppend.c:691
void ExecAppendEstimate(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:367
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define makeNode(_type_)
Definition: nodes.h:573
struct PartitionPruneInfo * part_prune_info
Definition: plannodes.h:264
#define Assert(condition)
Definition: c.h:739
#define EXEC_FLAG_MARK
Definition: executor.h:59
static bool choose_next_subplan_for_leader(AppendState *node)
Definition: nodeAppend.c:486
int as_whichplan
Definition: execnodes.h:1218
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:444
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
static int list_length(const List *l)
Definition: pg_list.h:169
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1769
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:494
bool resultopsfixed
Definition: execnodes.h:1018
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * palloc(Size size)
Definition: mcxt.c:949
PlanState ** appendplans
Definition: execnodes.h:1216
int i
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
PartitionPruneState * ExecCreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *partitionpruneinfo)
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:138
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define offsetof(type, field)
Definition: c.h:662
shm_toc * toc
Definition: parallel.h:44
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate)