PostgreSQL Source Code  git master
nodeAppend.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nodeAppend.c
4  * routines to handle append nodes.
5  *
6  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/executor/nodeAppend.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /* INTERFACE ROUTINES
16  * ExecInitAppend - initialize the append node
17  * ExecAppend - retrieve the next tuple from the node
18  * ExecEndAppend - shut down the append node
19  * ExecReScanAppend - rescan the append node
20  *
21  * NOTES
22  * Each append node contains a list of one or more subplans which
23  * must be iteratively processed (forwards or backwards).
24  * Tuples are retrieved by executing the 'whichplan'th subplan
25  * until the subplan stops returning tuples, at which point that
26  * plan is shut down and the next started up.
27  *
28  * Append nodes don't make use of their left and right
29  * subtrees, rather they maintain a list of subplans so
30  * a typical append node looks like this in the plan tree:
31  *
32  * ...
33  * /
34  * Append -------+------+------+--- nil
35  * / \ | | |
36  * nil nil ... ... ...
37  * subplans
38  *
39  * Append nodes are currently used for unions, and to support
40  * inheritance queries, where several relations need to be scanned.
41  * For example, in our standard person/student/employee/student-emp
42  * example, where student and employee inherit from person
43  * and student-emp inherits from student and employee, the
44  * query:
45  *
46  * select name from person
47  *
48  * generates the plan:
49  *
50  * |
51  * Append -------+-------+--------+--------+
52  * / \ | | | |
53  * nil nil Scan Scan Scan Scan
54  * | | | |
55  * person employee student student-emp
56  */
57 
58 #include "postgres.h"
59 
60 #include "executor/execdebug.h"
61 #include "executor/execPartition.h"
62 #include "executor/nodeAppend.h"
63 #include "miscadmin.h"
64 
65 /* Shared state for parallel-aware Append. */
67 {
68  LWLock pa_lock; /* mutual exclusion to choose next subplan */
69  int pa_next_plan; /* next plan to choose by any worker */
70 
71  /*
72  * pa_finished[i] should be true if no more workers should select subplan
73  * i. for a non-partial plan, this should be set to true as soon as a
74  * worker selects the plan; for a partial plan, it remains false until
75  * some worker executes the plan to completion.
76  */
77  bool pa_finished[FLEXIBLE_ARRAY_MEMBER];
78 };
79 
80 #define INVALID_SUBPLAN_INDEX -1
81 #define NO_MATCHING_SUBPLANS -2
82 
83 static TupleTableSlot *ExecAppend(PlanState *pstate);
84 static bool choose_next_subplan_locally(AppendState *node);
88 
89 /* ----------------------------------------------------------------
90  * ExecInitAppend
91  *
92  * Begin all of the subscans of the append node.
93  *
94  * (This is potentially wasteful, since the entire result of the
95  * append node may not be scanned, but this way all of the
96  * structures get allocated in the executor's top level memory
97  * block instead of that of the call to ExecAppend.)
98  * ----------------------------------------------------------------
99  */
100 AppendState *
101 ExecInitAppend(Append *node, EState *estate, int eflags)
102 {
103  AppendState *appendstate = makeNode(AppendState);
104  PlanState **appendplanstates;
105  Bitmapset *validsubplans;
106  int nplans;
107  int firstvalid;
108  int i,
109  j;
110 
111  /* check for unsupported flags */
112  Assert(!(eflags & EXEC_FLAG_MARK));
113 
114  /*
115  * create new AppendState for our append node
116  */
117  appendstate->ps.plan = (Plan *) node;
118  appendstate->ps.state = estate;
119  appendstate->ps.ExecProcNode = ExecAppend;
120 
121  /* Let choose_next_subplan_* function handle setting the first subplan */
122  appendstate->as_whichplan = INVALID_SUBPLAN_INDEX;
123 
124  /* If run-time partition pruning is enabled, then set that up now */
125  if (node->part_prune_info != NULL)
126  {
127  PartitionPruneState *prunestate;
128 
129  /* We may need an expression context to evaluate partition exprs */
130  ExecAssignExprContext(estate, &appendstate->ps);
131 
132  /* Create the working data structure for pruning. */
133  prunestate = ExecCreatePartitionPruneState(&appendstate->ps,
134  node->part_prune_info);
135  appendstate->as_prune_state = prunestate;
136 
137  /* Perform an initial partition prune, if required. */
138  if (prunestate->do_initial_prune)
139  {
140  /* Determine which subplans survive initial pruning */
141  validsubplans = ExecFindInitialMatchingSubPlans(prunestate,
142  list_length(node->appendplans));
143 
144  /*
145  * The case where no subplans survive pruning must be handled
146  * specially. The problem here is that code in explain.c requires
147  * an Append to have at least one subplan in order for it to
148  * properly determine the Vars in that subplan's targetlist. We
149  * sidestep this issue by just initializing the first subplan and
150  * setting as_whichplan to NO_MATCHING_SUBPLANS to indicate that
151  * we don't really need to scan any subnodes.
152  */
153  if (bms_is_empty(validsubplans))
154  {
155  appendstate->as_whichplan = NO_MATCHING_SUBPLANS;
156 
157  /* Mark the first as valid so that it's initialized below */
158  validsubplans = bms_make_singleton(0);
159  }
160 
161  nplans = bms_num_members(validsubplans);
162  }
163  else
164  {
165  /* We'll need to initialize all subplans */
166  nplans = list_length(node->appendplans);
167  Assert(nplans > 0);
168  validsubplans = bms_add_range(NULL, 0, nplans - 1);
169  }
170 
171  /*
172  * If no runtime pruning is required, we can fill as_valid_subplans
173  * immediately, preventing later calls to ExecFindMatchingSubPlans.
174  */
175  if (!prunestate->do_exec_prune)
176  {
177  Assert(nplans > 0);
178  appendstate->as_valid_subplans = bms_add_range(NULL, 0, nplans - 1);
179  }
180  }
181  else
182  {
183  nplans = list_length(node->appendplans);
184 
185  /*
186  * When run-time partition pruning is not enabled we can just mark all
187  * subplans as valid; they must also all be initialized.
188  */
189  Assert(nplans > 0);
190  appendstate->as_valid_subplans = validsubplans =
191  bms_add_range(NULL, 0, nplans - 1);
192  appendstate->as_prune_state = NULL;
193  }
194 
195  /*
196  * Initialize result tuple type and slot.
197  */
198  ExecInitResultTupleSlotTL(&appendstate->ps, &TTSOpsVirtual);
199 
200  /* node returns slots from each of its subnodes, therefore not fixed */
201  appendstate->ps.resultopsset = true;
202  appendstate->ps.resultopsfixed = false;
203 
204  appendplanstates = (PlanState **) palloc(nplans *
205  sizeof(PlanState *));
206 
207  /*
208  * call ExecInitNode on each of the valid plans to be executed and save
209  * the results into the appendplanstates array.
210  *
211  * While at it, find out the first valid partial plan.
212  */
213  j = 0;
214  firstvalid = nplans;
215  i = -1;
216  while ((i = bms_next_member(validsubplans, i)) >= 0)
217  {
218  Plan *initNode = (Plan *) list_nth(node->appendplans, i);
219 
220  /*
221  * Record the lowest appendplans index which is a valid partial plan.
222  */
223  if (i >= node->first_partial_plan && j < firstvalid)
224  firstvalid = j;
225 
226  appendplanstates[j++] = ExecInitNode(initNode, estate, eflags);
227  }
228 
229  appendstate->as_first_partial_plan = firstvalid;
230  appendstate->appendplans = appendplanstates;
231  appendstate->as_nplans = nplans;
232 
233  /*
234  * Miscellaneous initialization
235  */
236 
237  appendstate->ps.ps_ProjInfo = NULL;
238 
239  /* For parallel query, this will be overridden later. */
241 
242  return appendstate;
243 }
244 
245 /* ----------------------------------------------------------------
246  * ExecAppend
247  *
248  * Handles iteration over multiple subplans.
249  * ----------------------------------------------------------------
250  */
251 static TupleTableSlot *
253 {
254  AppendState *node = castNode(AppendState, pstate);
255 
256  if (node->as_whichplan < 0)
257  {
258  /*
259  * If no subplan has been chosen, we must choose one before
260  * proceeding.
261  */
262  if (node->as_whichplan == INVALID_SUBPLAN_INDEX &&
263  !node->choose_next_subplan(node))
264  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
265 
266  /* Nothing to do if there are no matching subplans */
267  else if (node->as_whichplan == NO_MATCHING_SUBPLANS)
268  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
269  }
270 
271  for (;;)
272  {
273  PlanState *subnode;
274  TupleTableSlot *result;
275 
277 
278  /*
279  * figure out which subplan we are currently processing
280  */
281  Assert(node->as_whichplan >= 0 && node->as_whichplan < node->as_nplans);
282  subnode = node->appendplans[node->as_whichplan];
283 
284  /*
285  * get a tuple from the subplan
286  */
287  result = ExecProcNode(subnode);
288 
289  if (!TupIsNull(result))
290  {
291  /*
292  * If the subplan gave us something then return it as-is. We do
293  * NOT make use of the result slot that was set up in
294  * ExecInitAppend; there's no need for it.
295  */
296  return result;
297  }
298 
299  /* choose new subplan; if none, we're done */
300  if (!node->choose_next_subplan(node))
301  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
302  }
303 }
304 
305 /* ----------------------------------------------------------------
306  * ExecEndAppend
307  *
308  * Shuts down the subscans of the append node.
309  *
310  * Returns nothing of interest.
311  * ----------------------------------------------------------------
312  */
313 void
315 {
316  PlanState **appendplans;
317  int nplans;
318  int i;
319 
320  /*
321  * get information from the node
322  */
323  appendplans = node->appendplans;
324  nplans = node->as_nplans;
325 
326  /*
327  * shut down each of the subscans
328  */
329  for (i = 0; i < nplans; i++)
330  ExecEndNode(appendplans[i]);
331 }
332 
333 void
335 {
336  int i;
337 
338  /*
339  * If any PARAM_EXEC Params used in pruning expressions have changed, then
340  * we'd better unset the valid subplans so that they are reselected for
341  * the new parameter values.
342  */
343  if (node->as_prune_state &&
344  bms_overlap(node->ps.chgParam,
346  {
348  node->as_valid_subplans = NULL;
349  }
350 
351  for (i = 0; i < node->as_nplans; i++)
352  {
353  PlanState *subnode = node->appendplans[i];
354 
355  /*
356  * ExecReScan doesn't know about my subplans, so I have to do
357  * changed-parameter signaling myself.
358  */
359  if (node->ps.chgParam != NULL)
360  UpdateChangedParamSet(subnode, node->ps.chgParam);
361 
362  /*
363  * If chgParam of subnode is not null then plan will be re-scanned by
364  * first ExecProcNode.
365  */
366  if (subnode->chgParam == NULL)
367  ExecReScan(subnode);
368  }
369 
370  /* Let choose_next_subplan_* function handle setting the first subplan */
372 }
373 
374 /* ----------------------------------------------------------------
375  * Parallel Append Support
376  * ----------------------------------------------------------------
377  */
378 
379 /* ----------------------------------------------------------------
380  * ExecAppendEstimate
381  *
382  * Compute the amount of space we'll need in the parallel
383  * query DSM, and inform pcxt->estimator about our needs.
384  * ----------------------------------------------------------------
385  */
386 void
388  ParallelContext *pcxt)
389 {
390  node->pstate_len =
392  sizeof(bool) * node->as_nplans);
393 
395  shm_toc_estimate_keys(&pcxt->estimator, 1);
396 }
397 
398 
399 /* ----------------------------------------------------------------
400  * ExecAppendInitializeDSM
401  *
402  * Set up shared state for Parallel Append.
403  * ----------------------------------------------------------------
404  */
405 void
407  ParallelContext *pcxt)
408 {
409  ParallelAppendState *pstate;
410 
411  pstate = shm_toc_allocate(pcxt->toc, node->pstate_len);
412  memset(pstate, 0, node->pstate_len);
414  shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate);
415 
416  node->as_pstate = pstate;
418 }
419 
420 /* ----------------------------------------------------------------
421  * ExecAppendReInitializeDSM
422  *
423  * Reset shared state before beginning a fresh scan.
424  * ----------------------------------------------------------------
425  */
426 void
428 {
429  ParallelAppendState *pstate = node->as_pstate;
430 
431  pstate->pa_next_plan = 0;
432  memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans);
433 }
434 
435 /* ----------------------------------------------------------------
436  * ExecAppendInitializeWorker
437  *
438  * Copy relevant information from TOC into planstate, and initialize
439  * whatever is required to choose and execute the optimal subplan.
440  * ----------------------------------------------------------------
441  */
442 void
444 {
445  node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
447 }
448 
449 /* ----------------------------------------------------------------
450  * choose_next_subplan_locally
451  *
452  * Choose next subplan for a non-parallel-aware Append,
453  * returning false if there are no more.
454  * ----------------------------------------------------------------
455  */
456 static bool
458 {
459  int whichplan = node->as_whichplan;
460  int nextplan;
461 
462  /* We should never be called when there are no subplans */
463  Assert(whichplan != NO_MATCHING_SUBPLANS);
464 
465  /*
466  * If first call then have the bms member function choose the first valid
467  * subplan by initializing whichplan to -1. If there happen to be no
468  * valid subplans then the bms member function will handle that by
469  * returning a negative number which will allow us to exit returning a
470  * false value.
471  */
472  if (whichplan == INVALID_SUBPLAN_INDEX)
473  {
474  if (node->as_valid_subplans == NULL)
475  node->as_valid_subplans =
477 
478  whichplan = -1;
479  }
480 
481  /* Ensure whichplan is within the expected range */
482  Assert(whichplan >= -1 && whichplan <= node->as_nplans);
483 
485  nextplan = bms_next_member(node->as_valid_subplans, whichplan);
486  else
487  nextplan = bms_prev_member(node->as_valid_subplans, whichplan);
488 
489  if (nextplan < 0)
490  return false;
491 
492  node->as_whichplan = nextplan;
493 
494  return true;
495 }
496 
497 /* ----------------------------------------------------------------
498  * choose_next_subplan_for_leader
499  *
500  * Try to pick a plan which doesn't commit us to doing much
501  * work locally, so that as much work as possible is done in
502  * the workers. Cheapest subplans are at the end.
503  * ----------------------------------------------------------------
504  */
505 static bool
507 {
508  ParallelAppendState *pstate = node->as_pstate;
509 
510  /* Backward scan is not supported by parallel-aware plans */
512 
513  /* We should never be called when there are no subplans */
515 
517 
518  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
519  {
520  /* Mark just-completed subplan as finished. */
521  node->as_pstate->pa_finished[node->as_whichplan] = true;
522  }
523  else
524  {
525  /* Start with last subplan. */
526  node->as_whichplan = node->as_nplans - 1;
527 
528  /*
529  * If we've yet to determine the valid subplans then do so now. If
530  * run-time pruning is disabled then the valid subplans will always be
531  * set to all subplans.
532  */
533  if (node->as_valid_subplans == NULL)
534  {
535  node->as_valid_subplans =
537 
538  /*
539  * Mark each invalid plan as finished to allow the loop below to
540  * select the first valid subplan.
541  */
543  }
544  }
545 
546  /* Loop until we find a subplan to execute. */
547  while (pstate->pa_finished[node->as_whichplan])
548  {
549  if (node->as_whichplan == 0)
550  {
553  LWLockRelease(&pstate->pa_lock);
554  return false;
555  }
556 
557  /*
558  * We needn't pay attention to as_valid_subplans here as all invalid
559  * plans have been marked as finished.
560  */
561  node->as_whichplan--;
562  }
563 
564  /* If non-partial, immediately mark as finished. */
565  if (node->as_whichplan < node->as_first_partial_plan)
566  node->as_pstate->pa_finished[node->as_whichplan] = true;
567 
568  LWLockRelease(&pstate->pa_lock);
569 
570  return true;
571 }
572 
573 /* ----------------------------------------------------------------
574  * choose_next_subplan_for_worker
575  *
576  * Choose next subplan for a parallel-aware Append, returning
577  * false if there are no more.
578  *
579  * We start from the first plan and advance through the list;
580  * when we get back to the end, we loop back to the first
581  * partial plan. This assigns the non-partial plans first in
582  * order of descending cost and then spreads out the workers
583  * as evenly as possible across the remaining partial plans.
584  * ----------------------------------------------------------------
585  */
586 static bool
588 {
589  ParallelAppendState *pstate = node->as_pstate;
590 
591  /* Backward scan is not supported by parallel-aware plans */
593 
594  /* We should never be called when there are no subplans */
596 
598 
599  /* Mark just-completed subplan as finished. */
600  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
601  node->as_pstate->pa_finished[node->as_whichplan] = true;
602 
603  /*
604  * If we've yet to determine the valid subplans then do so now. If
605  * run-time pruning is disabled then the valid subplans will always be set
606  * to all subplans.
607  */
608  else if (node->as_valid_subplans == NULL)
609  {
610  node->as_valid_subplans =
613  }
614 
615  /* If all the plans are already done, we have nothing to do */
616  if (pstate->pa_next_plan == INVALID_SUBPLAN_INDEX)
617  {
618  LWLockRelease(&pstate->pa_lock);
619  return false;
620  }
621 
622  /* Save the plan from which we are starting the search. */
623  node->as_whichplan = pstate->pa_next_plan;
624 
625  /* Loop until we find a valid subplan to execute. */
626  while (pstate->pa_finished[pstate->pa_next_plan])
627  {
628  int nextplan;
629 
630  nextplan = bms_next_member(node->as_valid_subplans,
631  pstate->pa_next_plan);
632  if (nextplan >= 0)
633  {
634  /* Advance to the next valid plan. */
635  pstate->pa_next_plan = nextplan;
636  }
637  else if (node->as_whichplan > node->as_first_partial_plan)
638  {
639  /*
640  * Try looping back to the first valid partial plan, if there is
641  * one. If there isn't, arrange to bail out below.
642  */
643  nextplan = bms_next_member(node->as_valid_subplans,
644  node->as_first_partial_plan - 1);
645  pstate->pa_next_plan =
646  nextplan < 0 ? node->as_whichplan : nextplan;
647  }
648  else
649  {
650  /*
651  * At last plan, and either there are no partial plans or we've
652  * tried them all. Arrange to bail out.
653  */
654  pstate->pa_next_plan = node->as_whichplan;
655  }
656 
657  if (pstate->pa_next_plan == node->as_whichplan)
658  {
659  /* We've tried everything! */
661  LWLockRelease(&pstate->pa_lock);
662  return false;
663  }
664  }
665 
666  /* Pick the plan we found, and advance pa_next_plan one more time. */
667  node->as_whichplan = pstate->pa_next_plan;
669  pstate->pa_next_plan);
670 
671  /*
672  * If there are no more valid plans then try setting the next plan to the
673  * first valid partial plan.
674  */
675  if (pstate->pa_next_plan < 0)
676  {
677  int nextplan = bms_next_member(node->as_valid_subplans,
678  node->as_first_partial_plan - 1);
679 
680  if (nextplan >= 0)
681  pstate->pa_next_plan = nextplan;
682  else
683  {
684  /*
685  * There are no valid partial plans, and we already chose the last
686  * non-partial plan; so flag that there's nothing more for our
687  * fellow workers to do.
688  */
690  }
691  }
692 
693  /* If non-partial, immediately mark as finished. */
694  if (node->as_whichplan < node->as_first_partial_plan)
695  node->as_pstate->pa_finished[node->as_whichplan] = true;
696 
697  LWLockRelease(&pstate->pa_lock);
698 
699  return true;
700 }
701 
702 /*
703  * mark_invalid_subplans_as_finished
704  * Marks the ParallelAppendState's pa_finished as true for each invalid
705  * subplan.
706  *
707  * This function should only be called for parallel Append with run-time
708  * pruning enabled.
709  */
710 static void
712 {
713  int i;
714 
715  /* Only valid to call this while in parallel Append mode */
716  Assert(node->as_pstate);
717 
718  /* Shouldn't have been called when run-time pruning is not enabled */
719  Assert(node->as_prune_state);
720 
721  /* Nothing to do if all plans are valid */
722  if (bms_num_members(node->as_valid_subplans) == node->as_nplans)
723  return;
724 
725  /* Mark all non-valid plans as finished */
726  for (i = 0; i < node->as_nplans; i++)
727  {
728  if (!bms_is_member(i, node->as_valid_subplans))
729  node->as_pstate->pa_finished[i] = true;
730  }
731 }
Size pstate_len
Definition: execnodes.h:1173
Definition: lwlock.h:32
void ExecAppendInitializeWorker(AppendState *node, ParallelWorkerContext *pwcxt)
Definition: nodeAppend.c:443
static TupleTableSlot * ExecAppend(PlanState *pstate)
Definition: nodeAppend.c:252
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:985
#define NO_MATCHING_SUBPLANS
Definition: nodeAppend.c:81
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:426
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1174
#define castNode(_type_, nodeptr)
Definition: nodes.h:593
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:538
void ExecAppendReInitializeDSM(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:427
shm_toc_estimator estimator
Definition: parallel.h:41
static bool choose_next_subplan_locally(AppendState *node)
Definition: nodeAppend.c:457
void ExecReScan(PlanState *node)
Definition: execAmi.c:77
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:84
int plan_node_id
Definition: plannodes.h:139
AppendState * ExecInitAppend(Append *node, EState *estate, int eflags)
Definition: nodeAppend.c:101
EState * state
Definition: execnodes.h:947
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
ScanDirection es_direction
Definition: execnodes.h:502
PlanState ps
Definition: execnodes.h:1166
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
List * appendplans
Definition: plannodes.h:252
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:983
Bitmapset * bms_add_range(Bitmapset *a, int lower, int upper)
Definition: bitmapset.c:834
ParallelAppendState * as_pstate
Definition: execnodes.h:1172
static void * list_nth(const List *list, int n)
Definition: pg_list.h:277
void ExecEndAppend(AppendState *node)
Definition: nodeAppend.c:314
int first_partial_plan
Definition: plannodes.h:258
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1176
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:646
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:186
Bitmapset * ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate, int nsubplans)
#define TupIsNull(slot)
Definition: tuptable.h:293
int as_first_partial_plan
Definition: execnodes.h:1170
Bitmapset * execparamids
bool resultopsset
Definition: execnodes.h:1028
Bitmapset * chgParam
Definition: execnodes.h:977
static bool choose_next_subplan_for_worker(AppendState *node)
Definition: nodeAppend.c:587
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:678
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:951
Bitmapset * as_valid_subplans
Definition: execnodes.h:1175
void ExecReScanAppend(AppendState *node)
Definition: nodeAppend.c:334
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:235
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:77
Plan * plan
Definition: execnodes.h:945
int bms_prev_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1102
void UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
Definition: execUtils.c:804
void ExecAppendInitializeDSM(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:406
static void mark_invalid_subplans_as_finished(AppendState *node)
Definition: nodeAppend.c:711
void ExecAppendEstimate(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:387
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define makeNode(_type_)
Definition: nodes.h:572
struct PartitionPruneInfo * part_prune_info
Definition: plannodes.h:261
#define Assert(condition)
Definition: c.h:732
#define EXEC_FLAG_MARK
Definition: executor.h:59
static bool choose_next_subplan_for_leader(AppendState *node)
Definition: nodeAppend.c:506
int as_whichplan
Definition: execnodes.h:1169
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:446
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
static int list_length(const List *l)
Definition: pg_list.h:169
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1764
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:494
bool resultopsfixed
Definition: execnodes.h:1024
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * palloc(Size size)
Definition: mcxt.c:924
PlanState ** appendplans
Definition: execnodes.h:1167
int i
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
PartitionPruneState * ExecCreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *partitionpruneinfo)
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:139
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define offsetof(type, field)
Definition: c.h:655
shm_toc * toc
Definition: parallel.h:44
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate)