PostgreSQL Source Code  git master
nodeAppend.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nodeAppend.c
4  * routines to handle append nodes.
5  *
6  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/executor/nodeAppend.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /* INTERFACE ROUTINES
16  * ExecInitAppend - initialize the append node
17  * ExecAppend - retrieve the next tuple from the node
18  * ExecEndAppend - shut down the append node
19  * ExecReScanAppend - rescan the append node
20  *
21  * NOTES
22  * Each append node contains a list of one or more subplans which
23  * must be iteratively processed (forwards or backwards).
24  * Tuples are retrieved by executing the 'whichplan'th subplan
25  * until the subplan stops returning tuples, at which point that
26  * plan is shut down and the next started up.
27  *
28  * Append nodes don't make use of their left and right
29  * subtrees, rather they maintain a list of subplans so
30  * a typical append node looks like this in the plan tree:
31  *
32  * ...
33  * /
34  * Append -------+------+------+--- nil
35  * / \ | | |
36  * nil nil ... ... ...
37  * subplans
38  *
39  * Append nodes are currently used for unions, and to support
40  * inheritance queries, where several relations need to be scanned.
41  * For example, in our standard person/student/employee/student-emp
42  * example, where student and employee inherit from person
43  * and student-emp inherits from student and employee, the
44  * query:
45  *
46  * select name from person
47  *
48  * generates the plan:
49  *
50  * |
51  * Append -------+-------+--------+--------+
52  * / \ | | | |
53  * nil nil Scan Scan Scan Scan
54  * | | | |
55  * person employee student student-emp
56  */
57 
58 #include "postgres.h"
59 
60 #include "executor/execdebug.h"
61 #include "executor/execPartition.h"
62 #include "executor/nodeAppend.h"
63 #include "miscadmin.h"
64 
65 /* Shared state for parallel-aware Append. */
67 {
68  LWLock pa_lock; /* mutual exclusion to choose next subplan */
69  int pa_next_plan; /* next plan to choose by any worker */
70 
71  /*
72  * pa_finished[i] should be true if no more workers should select subplan
73  * i. for a non-partial plan, this should be set to true as soon as a
74  * worker selects the plan; for a partial plan, it remains false until
75  * some worker executes the plan to completion.
76  */
77  bool pa_finished[FLEXIBLE_ARRAY_MEMBER];
78 };
79 
80 #define INVALID_SUBPLAN_INDEX -1
81 #define NO_MATCHING_SUBPLANS -2
82 
83 static TupleTableSlot *ExecAppend(PlanState *pstate);
84 static bool choose_next_subplan_locally(AppendState *node);
88 
89 /* ----------------------------------------------------------------
90  * ExecInitAppend
91  *
92  * Begin all of the subscans of the append node.
93  *
94  * (This is potentially wasteful, since the entire result of the
95  * append node may not be scanned, but this way all of the
96  * structures get allocated in the executor's top level memory
97  * block instead of that of the call to ExecAppend.)
98  * ----------------------------------------------------------------
99  */
100 AppendState *
101 ExecInitAppend(Append *node, EState *estate, int eflags)
102 {
103  AppendState *appendstate = makeNode(AppendState);
104  PlanState **appendplanstates;
105  Bitmapset *validsubplans;
106  int nplans;
107  int firstvalid;
108  int i,
109  j;
110  ListCell *lc;
111 
112  /* check for unsupported flags */
113  Assert(!(eflags & EXEC_FLAG_MARK));
114 
115  /*
116  * Lock the non-leaf tables in the partition tree controlled by this node.
117  * It's a no-op for non-partitioned parent tables.
118  */
120 
121  /*
122  * create new AppendState for our append node
123  */
124  appendstate->ps.plan = (Plan *) node;
125  appendstate->ps.state = estate;
126  appendstate->ps.ExecProcNode = ExecAppend;
127 
128  /* Let choose_next_subplan_* function handle setting the first subplan */
129  appendstate->as_whichplan = INVALID_SUBPLAN_INDEX;
130 
131  /* If run-time partition pruning is enabled, then set that up now */
132  if (node->part_prune_infos != NIL)
133  {
134  PartitionPruneState *prunestate;
135 
136  /* We may need an expression context to evaluate partition exprs */
137  ExecAssignExprContext(estate, &appendstate->ps);
138 
139  /* Create the working data structure for pruning. */
140  prunestate = ExecCreatePartitionPruneState(&appendstate->ps,
141  node->part_prune_infos);
142  appendstate->as_prune_state = prunestate;
143 
144  /* Perform an initial partition prune, if required. */
145  if (prunestate->do_initial_prune)
146  {
147  /* Determine which subplans survive initial pruning */
148  validsubplans = ExecFindInitialMatchingSubPlans(prunestate,
149  list_length(node->appendplans));
150 
151  /*
152  * The case where no subplans survive pruning must be handled
153  * specially. The problem here is that code in explain.c requires
154  * an Append to have at least one subplan in order for it to
155  * properly determine the Vars in that subplan's targetlist. We
156  * sidestep this issue by just initializing the first subplan and
157  * setting as_whichplan to NO_MATCHING_SUBPLANS to indicate that
158  * we don't really need to scan any subnodes.
159  */
160  if (bms_is_empty(validsubplans))
161  {
162  appendstate->as_whichplan = NO_MATCHING_SUBPLANS;
163 
164  /* Mark the first as valid so that it's initialized below */
165  validsubplans = bms_make_singleton(0);
166  }
167 
168  nplans = bms_num_members(validsubplans);
169  }
170  else
171  {
172  /* We'll need to initialize all subplans */
173  nplans = list_length(node->appendplans);
174  validsubplans = bms_add_range(NULL, 0, nplans - 1);
175  }
176 
177  /*
178  * If no runtime pruning is required, we can fill as_valid_subplans
179  * immediately, preventing later calls to ExecFindMatchingSubPlans.
180  */
181  if (!prunestate->do_exec_prune)
182  appendstate->as_valid_subplans = bms_add_range(NULL, 0, nplans - 1);
183  }
184  else
185  {
186  nplans = list_length(node->appendplans);
187 
188  /*
189  * When run-time partition pruning is not enabled we can just mark all
190  * subplans as valid; they must also all be initialized.
191  */
192  appendstate->as_valid_subplans = validsubplans =
193  bms_add_range(NULL, 0, nplans - 1);
194  appendstate->as_prune_state = NULL;
195  }
196 
197  /*
198  * Initialize result tuple type and slot.
199  */
200  ExecInitResultTupleSlotTL(estate, &appendstate->ps);
201 
202  appendplanstates = (PlanState **) palloc(nplans *
203  sizeof(PlanState *));
204 
205  /*
206  * call ExecInitNode on each of the valid plans to be executed and save
207  * the results into the appendplanstates array.
208  *
209  * While at it, find out the first valid partial plan.
210  */
211  j = i = 0;
212  firstvalid = nplans;
213  foreach(lc, node->appendplans)
214  {
215  if (bms_is_member(i, validsubplans))
216  {
217  Plan *initNode = (Plan *) lfirst(lc);
218 
219  /*
220  * Record the lowest appendplans index which is a valid partial
221  * plan.
222  */
223  if (i >= node->first_partial_plan && j < firstvalid)
224  firstvalid = j;
225 
226  appendplanstates[j++] = ExecInitNode(initNode, estate, eflags);
227  }
228  i++;
229  }
230 
231  appendstate->as_first_partial_plan = firstvalid;
232  appendstate->appendplans = appendplanstates;
233  appendstate->as_nplans = nplans;
234 
235  /*
236  * Miscellaneous initialization
237  */
238 
239  appendstate->ps.ps_ProjInfo = NULL;
240 
241  /* For parallel query, this will be overridden later. */
243 
244  return appendstate;
245 }
246 
247 /* ----------------------------------------------------------------
248  * ExecAppend
249  *
250  * Handles iteration over multiple subplans.
251  * ----------------------------------------------------------------
252  */
253 static TupleTableSlot *
255 {
256  AppendState *node = castNode(AppendState, pstate);
257 
258  if (node->as_whichplan < 0)
259  {
260  /*
261  * If no subplan has been chosen, we must choose one before
262  * proceeding.
263  */
264  if (node->as_whichplan == INVALID_SUBPLAN_INDEX &&
265  !node->choose_next_subplan(node))
266  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
267 
268  /* Nothing to do if there are no matching subplans */
269  else if (node->as_whichplan == NO_MATCHING_SUBPLANS)
270  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
271  }
272 
273  for (;;)
274  {
275  PlanState *subnode;
276  TupleTableSlot *result;
277 
279 
280  /*
281  * figure out which subplan we are currently processing
282  */
283  Assert(node->as_whichplan >= 0 && node->as_whichplan < node->as_nplans);
284  subnode = node->appendplans[node->as_whichplan];
285 
286  /*
287  * get a tuple from the subplan
288  */
289  result = ExecProcNode(subnode);
290 
291  if (!TupIsNull(result))
292  {
293  /*
294  * If the subplan gave us something then return it as-is. We do
295  * NOT make use of the result slot that was set up in
296  * ExecInitAppend; there's no need for it.
297  */
298  return result;
299  }
300 
301  /* choose new subplan; if none, we're done */
302  if (!node->choose_next_subplan(node))
303  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
304  }
305 }
306 
307 /* ----------------------------------------------------------------
308  * ExecEndAppend
309  *
310  * Shuts down the subscans of the append node.
311  *
312  * Returns nothing of interest.
313  * ----------------------------------------------------------------
314  */
315 void
317 {
318  PlanState **appendplans;
319  int nplans;
320  int i;
321 
322  /*
323  * get information from the node
324  */
325  appendplans = node->appendplans;
326  nplans = node->as_nplans;
327 
328  /*
329  * shut down each of the subscans
330  */
331  for (i = 0; i < nplans; i++)
332  ExecEndNode(appendplans[i]);
333 
334  /*
335  * release any resources associated with run-time pruning
336  */
337  if (node->as_prune_state)
339 }
340 
341 void
343 {
344  int i;
345 
346  /*
347  * If any PARAM_EXEC Params used in pruning expressions have changed, then
348  * we'd better unset the valid subplans so that they are reselected for
349  * the new parameter values.
350  */
351  if (node->as_prune_state &&
352  bms_overlap(node->ps.chgParam,
354  {
356  node->as_valid_subplans = NULL;
357  }
358 
359  for (i = 0; i < node->as_nplans; i++)
360  {
361  PlanState *subnode = node->appendplans[i];
362 
363  /*
364  * ExecReScan doesn't know about my subplans, so I have to do
365  * changed-parameter signaling myself.
366  */
367  if (node->ps.chgParam != NULL)
368  UpdateChangedParamSet(subnode, node->ps.chgParam);
369 
370  /*
371  * If chgParam of subnode is not null then plan will be re-scanned by
372  * first ExecProcNode.
373  */
374  if (subnode->chgParam == NULL)
375  ExecReScan(subnode);
376  }
377 
378  /* Let choose_next_subplan_* function handle setting the first subplan */
380 }
381 
382 /* ----------------------------------------------------------------
383  * Parallel Append Support
384  * ----------------------------------------------------------------
385  */
386 
387 /* ----------------------------------------------------------------
388  * ExecAppendEstimate
389  *
390  * Compute the amount of space we'll need in the parallel
391  * query DSM, and inform pcxt->estimator about our needs.
392  * ----------------------------------------------------------------
393  */
394 void
396  ParallelContext *pcxt)
397 {
398  node->pstate_len =
400  sizeof(bool) * node->as_nplans);
401 
403  shm_toc_estimate_keys(&pcxt->estimator, 1);
404 }
405 
406 
407 /* ----------------------------------------------------------------
408  * ExecAppendInitializeDSM
409  *
410  * Set up shared state for Parallel Append.
411  * ----------------------------------------------------------------
412  */
413 void
415  ParallelContext *pcxt)
416 {
417  ParallelAppendState *pstate;
418 
419  pstate = shm_toc_allocate(pcxt->toc, node->pstate_len);
420  memset(pstate, 0, node->pstate_len);
422  shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate);
423 
424  node->as_pstate = pstate;
426 }
427 
428 /* ----------------------------------------------------------------
429  * ExecAppendReInitializeDSM
430  *
431  * Reset shared state before beginning a fresh scan.
432  * ----------------------------------------------------------------
433  */
434 void
436 {
437  ParallelAppendState *pstate = node->as_pstate;
438 
439  pstate->pa_next_plan = 0;
440  memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans);
441 }
442 
443 /* ----------------------------------------------------------------
444  * ExecAppendInitializeWorker
445  *
446  * Copy relevant information from TOC into planstate, and initialize
447  * whatever is required to choose and execute the optimal subplan.
448  * ----------------------------------------------------------------
449  */
450 void
452 {
453  node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
455 }
456 
457 /* ----------------------------------------------------------------
458  * choose_next_subplan_locally
459  *
460  * Choose next subplan for a non-parallel-aware Append,
461  * returning false if there are no more.
462  * ----------------------------------------------------------------
463  */
464 static bool
466 {
467  int whichplan = node->as_whichplan;
468  int nextplan;
469 
470  /* We should never be called when there are no subplans */
471  Assert(whichplan != NO_MATCHING_SUBPLANS);
472 
473  /*
474  * If first call then have the bms member function choose the first valid
475  * subplan by initializing whichplan to -1. If there happen to be no
476  * valid subplans then the bms member function will handle that by
477  * returning a negative number which will allow us to exit returning a
478  * false value.
479  */
480  if (whichplan == INVALID_SUBPLAN_INDEX)
481  {
482  if (node->as_valid_subplans == NULL)
483  node->as_valid_subplans =
485 
486  whichplan = -1;
487  }
488 
489  /* Ensure whichplan is within the expected range */
490  Assert(whichplan >= -1 && whichplan <= node->as_nplans);
491 
493  nextplan = bms_next_member(node->as_valid_subplans, whichplan);
494  else
495  nextplan = bms_prev_member(node->as_valid_subplans, whichplan);
496 
497  if (nextplan < 0)
498  return false;
499 
500  node->as_whichplan = nextplan;
501 
502  return true;
503 }
504 
505 /* ----------------------------------------------------------------
506  * choose_next_subplan_for_leader
507  *
508  * Try to pick a plan which doesn't commit us to doing much
509  * work locally, so that as much work as possible is done in
510  * the workers. Cheapest subplans are at the end.
511  * ----------------------------------------------------------------
512  */
513 static bool
515 {
516  ParallelAppendState *pstate = node->as_pstate;
517 
518  /* Backward scan is not supported by parallel-aware plans */
520 
521  /* We should never be called when there are no subplans */
523 
525 
526  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
527  {
528  /* Mark just-completed subplan as finished. */
529  node->as_pstate->pa_finished[node->as_whichplan] = true;
530  }
531  else
532  {
533  /* Start with last subplan. */
534  node->as_whichplan = node->as_nplans - 1;
535 
536  /*
537  * If we've yet to determine the valid subplans then do so now. If
538  * run-time pruning is disabled then the valid subplans will always be
539  * set to all subplans.
540  */
541  if (node->as_valid_subplans == NULL)
542  {
543  node->as_valid_subplans =
545 
546  /*
547  * Mark each invalid plan as finished to allow the loop below to
548  * select the first valid subplan.
549  */
551  }
552  }
553 
554  /* Loop until we find a subplan to execute. */
555  while (pstate->pa_finished[node->as_whichplan])
556  {
557  if (node->as_whichplan == 0)
558  {
561  LWLockRelease(&pstate->pa_lock);
562  return false;
563  }
564 
565  /*
566  * We needn't pay attention to as_valid_subplans here as all invalid
567  * plans have been marked as finished.
568  */
569  node->as_whichplan--;
570  }
571 
572  /* If non-partial, immediately mark as finished. */
573  if (node->as_whichplan < node->as_first_partial_plan)
574  node->as_pstate->pa_finished[node->as_whichplan] = true;
575 
576  LWLockRelease(&pstate->pa_lock);
577 
578  return true;
579 }
580 
581 /* ----------------------------------------------------------------
582  * choose_next_subplan_for_worker
583  *
584  * Choose next subplan for a parallel-aware Append, returning
585  * false if there are no more.
586  *
587  * We start from the first plan and advance through the list;
588  * when we get back to the end, we loop back to the first
589  * partial plan. This assigns the non-partial plans first in
590  * order of descending cost and then spreads out the workers
591  * as evenly as possible across the remaining partial plans.
592  * ----------------------------------------------------------------
593  */
594 static bool
596 {
597  ParallelAppendState *pstate = node->as_pstate;
598 
599  /* Backward scan is not supported by parallel-aware plans */
601 
602  /* We should never be called when there are no subplans */
604 
606 
607  /* Mark just-completed subplan as finished. */
608  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
609  node->as_pstate->pa_finished[node->as_whichplan] = true;
610 
611  /*
612  * If we've yet to determine the valid subplans then do so now. If
613  * run-time pruning is disabled then the valid subplans will always be set
614  * to all subplans.
615  */
616  else if (node->as_valid_subplans == NULL)
617  {
618  node->as_valid_subplans =
621  }
622 
623  /* If all the plans are already done, we have nothing to do */
624  if (pstate->pa_next_plan == INVALID_SUBPLAN_INDEX)
625  {
626  LWLockRelease(&pstate->pa_lock);
627  return false;
628  }
629 
630  /* Save the plan from which we are starting the search. */
631  node->as_whichplan = pstate->pa_next_plan;
632 
633  /* Loop until we find a valid subplan to execute. */
634  while (pstate->pa_finished[pstate->pa_next_plan])
635  {
636  int nextplan;
637 
638  nextplan = bms_next_member(node->as_valid_subplans,
639  pstate->pa_next_plan);
640  if (nextplan >= 0)
641  {
642  /* Advance to the next valid plan. */
643  pstate->pa_next_plan = nextplan;
644  }
645  else if (node->as_whichplan > node->as_first_partial_plan)
646  {
647  /*
648  * Try looping back to the first valid partial plan, if there is
649  * one. If there isn't, arrange to bail out below.
650  */
651  nextplan = bms_next_member(node->as_valid_subplans,
652  node->as_first_partial_plan - 1);
653  pstate->pa_next_plan =
654  nextplan < 0 ? node->as_whichplan : nextplan;
655  }
656  else
657  {
658  /*
659  * At last plan, and either there are no partial plans or we've
660  * tried them all. Arrange to bail out.
661  */
662  pstate->pa_next_plan = node->as_whichplan;
663  }
664 
665  if (pstate->pa_next_plan == node->as_whichplan)
666  {
667  /* We've tried everything! */
669  LWLockRelease(&pstate->pa_lock);
670  return false;
671  }
672  }
673 
674  /* Pick the plan we found, and advance pa_next_plan one more time. */
675  node->as_whichplan = pstate->pa_next_plan;
677  pstate->pa_next_plan);
678 
679  /*
680  * If there are no more valid plans then try setting the next plan to the
681  * first valid partial plan.
682  */
683  if (pstate->pa_next_plan < 0)
684  {
685  int nextplan = bms_next_member(node->as_valid_subplans,
686  node->as_first_partial_plan - 1);
687 
688  if (nextplan >= 0)
689  pstate->pa_next_plan = nextplan;
690  else
691  {
692  /*
693  * There are no valid partial plans, and we already chose the last
694  * non-partial plan; so flag that there's nothing more for our
695  * fellow workers to do.
696  */
698  }
699  }
700 
701  /* If non-partial, immediately mark as finished. */
702  if (node->as_whichplan < node->as_first_partial_plan)
703  node->as_pstate->pa_finished[node->as_whichplan] = true;
704 
705  LWLockRelease(&pstate->pa_lock);
706 
707  return true;
708 }
709 
710 /*
711  * mark_invalid_subplans_as_finished
712  * Marks the ParallelAppendState's pa_finished as true for each invalid
713  * subplan.
714  *
715  * This function should only be called for parallel Append with run-time
716  * pruning enabled.
717  */
718 static void
720 {
721  int i;
722 
723  /* Only valid to call this while in parallel Append mode */
724  Assert(node->as_pstate);
725 
726  /* Shouldn't have been called when run-time pruning is not enabled */
727  Assert(node->as_prune_state);
728 
729  /* Nothing to do if all plans are valid */
730  if (bms_num_members(node->as_valid_subplans) == node->as_nplans)
731  return;
732 
733  /* Mark all non-valid plans as finished */
734  for (i = 0; i < node->as_nplans; i++)
735  {
736  if (!bms_is_member(i, node->as_valid_subplans))
737  node->as_pstate->pa_finished[i] = true;
738  }
739 }
void ExecDestroyPartitionPruneState(PartitionPruneState *prunestate)
Size pstate_len
Definition: execnodes.h:1094
#define NIL
Definition: pg_list.h:69
void ExecLockNonLeafAppendTables(List *partitioned_rels, EState *estate)
Definition: execUtils.c:866
Definition: lwlock.h:32
void ExecAppendInitializeWorker(AppendState *node, ParallelWorkerContext *pwcxt)
Definition: nodeAppend.c:451
static TupleTableSlot * ExecAppend(PlanState *pstate)
Definition: nodeAppend.c:254
PartitionPruneState * ExecCreatePartitionPruneState(PlanState *planstate, List *partitionpruneinfo)
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:948
#define NO_MATCHING_SUBPLANS
Definition: nodeAppend.c:81
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1095
#define castNode(_type_, nodeptr)
Definition: nodes.h:585
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:538
void ExecAppendReInitializeDSM(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:435
shm_toc_estimator estimator
Definition: parallel.h:41
static bool choose_next_subplan_locally(AppendState *node)
Definition: nodeAppend.c:465
void ExecReScan(PlanState *node)
Definition: execAmi.c:76
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1075
TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: execTuples.c:475
int plan_node_id
Definition: plannodes.h:146
AppendState * ExecInitAppend(Append *node, EState *estate, int eflags)
Definition: nodeAppend.c:101
EState * state
Definition: execnodes.h:914
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
ScanDirection es_direction
Definition: execnodes.h:477
PlanState ps
Definition: execnodes.h:1087
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1725
List * appendplans
Definition: plannodes.h:252
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:946
Bitmapset * bms_add_range(Bitmapset *a, int lower, int upper)
Definition: bitmapset.c:862
ParallelAppendState * as_pstate
Definition: execnodes.h:1093
void ExecEndAppend(AppendState *node)
Definition: nodeAppend.c:316
int first_partial_plan
Definition: plannodes.h:258
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1097
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:671
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:245
Bitmapset * ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate, int nsubplans)
#define TupIsNull(slot)
Definition: tuptable.h:146
int as_first_partial_plan
Definition: execnodes.h:1091
Bitmapset * execparamids
List * partitioned_rels
Definition: plannodes.h:261
Bitmapset * chgParam
Definition: execnodes.h:941
static bool choose_next_subplan_for_worker(AppendState *node)
Definition: nodeAppend.c:595
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:677
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:729
List * part_prune_infos
Definition: plannodes.h:264
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:918
Bitmapset * as_valid_subplans
Definition: execnodes.h:1096
void ExecReScanAppend(AppendState *node)
Definition: nodeAppend.c:342
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:232
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:77
void ExecInitResultTupleSlotTL(EState *estate, PlanState *planstate)
Definition: execTuples.c:890
Plan * plan
Definition: execnodes.h:912
int bms_prev_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1139
void UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
Definition: execUtils.c:711
void ExecAppendInitializeDSM(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:414
static void mark_invalid_subplans_as_finished(AppendState *node)
Definition: nodeAppend.c:719
void ExecAppendEstimate(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:395
void bms_free(Bitmapset *a)
Definition: bitmapset.c:267
#define makeNode(_type_)
Definition: nodes.h:564
#define Assert(condition)
Definition: c.h:699
#define lfirst(lc)
Definition: pg_list.h:106
#define EXEC_FLAG_MARK
Definition: executor.h:61
static bool choose_next_subplan_for_leader(AppendState *node)
Definition: nodeAppend.c:514
int as_whichplan
Definition: execnodes.h:1090
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:428
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
static int list_length(const List *l)
Definition: pg_list.h:89
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1121
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:509
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * palloc(Size size)
Definition: mcxt.c:924
PlanState ** appendplans
Definition: execnodes.h:1088
int i
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:98
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:139
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:486
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define offsetof(type, field)
Definition: c.h:622
shm_toc * toc
Definition: parallel.h:44
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate)