PostgreSQL Source Code  git master
nodeAppend.c File Reference
#include "postgres.h"
#include "executor/execdebug.h"
#include "executor/execPartition.h"
#include "executor/nodeAppend.h"
#include "miscadmin.h"
Include dependency graph for nodeAppend.c:

Go to the source code of this file.

Data Structures

struct  ParallelAppendState
 

Macros

#define INVALID_SUBPLAN_INDEX   -1
 

Functions

static TupleTableSlotExecAppend (PlanState *pstate)
 
static bool choose_next_subplan_locally (AppendState *node)
 
static bool choose_next_subplan_for_leader (AppendState *node)
 
static bool choose_next_subplan_for_worker (AppendState *node)
 
static void mark_invalid_subplans_as_finished (AppendState *node)
 
AppendStateExecInitAppend (Append *node, EState *estate, int eflags)
 
void ExecEndAppend (AppendState *node)
 
void ExecReScanAppend (AppendState *node)
 
void ExecAppendEstimate (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendReInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeWorker (AppendState *node, ParallelWorkerContext *pwcxt)
 

Macro Definition Documentation

◆ INVALID_SUBPLAN_INDEX

Function Documentation

◆ choose_next_subplan_for_leader()

static bool choose_next_subplan_for_leader ( AppendState node)
static

Definition at line 486 of file nodeAppend.c.

References AppendState::as_first_partial_plan, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_pstate, AppendState::as_valid_subplans, AppendState::as_whichplan, Assert, EState::es_direction, ExecFindMatchingSubPlans(), INVALID_SUBPLAN_INDEX, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), mark_invalid_subplans_as_finished(), ParallelAppendState::pa_finished, ParallelAppendState::pa_lock, ParallelAppendState::pa_next_plan, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecAppendInitializeDSM().

487 {
488  ParallelAppendState *pstate = node->as_pstate;
489 
490  /* Backward scan is not supported by parallel-aware plans */
492 
493  /* We should never be called when there are no subplans */
494  Assert(node->as_nplans > 0);
495 
497 
498  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
499  {
500  /* Mark just-completed subplan as finished. */
501  node->as_pstate->pa_finished[node->as_whichplan] = true;
502  }
503  else
504  {
505  /* Start with last subplan. */
506  node->as_whichplan = node->as_nplans - 1;
507 
508  /*
509  * If we've yet to determine the valid subplans then do so now. If
510  * run-time pruning is disabled then the valid subplans will always be
511  * set to all subplans.
512  */
513  if (node->as_valid_subplans == NULL)
514  {
515  node->as_valid_subplans =
517 
518  /*
519  * Mark each invalid plan as finished to allow the loop below to
520  * select the first valid subplan.
521  */
523  }
524  }
525 
526  /* Loop until we find a subplan to execute. */
527  while (pstate->pa_finished[node->as_whichplan])
528  {
529  if (node->as_whichplan == 0)
530  {
533  LWLockRelease(&pstate->pa_lock);
534  return false;
535  }
536 
537  /*
538  * We needn't pay attention to as_valid_subplans here as all invalid
539  * plans have been marked as finished.
540  */
541  node->as_whichplan--;
542  }
543 
544  /* If non-partial, immediately mark as finished. */
545  if (node->as_whichplan < node->as_first_partial_plan)
546  node->as_pstate->pa_finished[node->as_whichplan] = true;
547 
548  LWLockRelease(&pstate->pa_lock);
549 
550  return true;
551 }
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1229
EState * state
Definition: execnodes.h:947
ScanDirection es_direction
Definition: execnodes.h:507
PlanState ps
Definition: execnodes.h:1221
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
ParallelAppendState * as_pstate
Definition: execnodes.h:1227
int as_first_partial_plan
Definition: execnodes.h:1225
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
Bitmapset * as_valid_subplans
Definition: execnodes.h:1230
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:77
static void mark_invalid_subplans_as_finished(AppendState *node)
Definition: nodeAppend.c:691
#define Assert(condition)
Definition: c.h:745
int as_whichplan
Definition: execnodes.h:1224
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate)

◆ choose_next_subplan_for_worker()

static bool choose_next_subplan_for_worker ( AppendState node)
static

Definition at line 567 of file nodeAppend.c.

References AppendState::as_first_partial_plan, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_pstate, AppendState::as_valid_subplans, AppendState::as_whichplan, Assert, bms_next_member(), EState::es_direction, ExecFindMatchingSubPlans(), INVALID_SUBPLAN_INDEX, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), mark_invalid_subplans_as_finished(), ParallelAppendState::pa_finished, ParallelAppendState::pa_lock, ParallelAppendState::pa_next_plan, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecAppendInitializeWorker().

568 {
569  ParallelAppendState *pstate = node->as_pstate;
570 
571  /* Backward scan is not supported by parallel-aware plans */
573 
574  /* We should never be called when there are no subplans */
575  Assert(node->as_nplans > 0);
576 
578 
579  /* Mark just-completed subplan as finished. */
580  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
581  node->as_pstate->pa_finished[node->as_whichplan] = true;
582 
583  /*
584  * If we've yet to determine the valid subplans then do so now. If
585  * run-time pruning is disabled then the valid subplans will always be set
586  * to all subplans.
587  */
588  else if (node->as_valid_subplans == NULL)
589  {
590  node->as_valid_subplans =
593  }
594 
595  /* If all the plans are already done, we have nothing to do */
596  if (pstate->pa_next_plan == INVALID_SUBPLAN_INDEX)
597  {
598  LWLockRelease(&pstate->pa_lock);
599  return false;
600  }
601 
602  /* Save the plan from which we are starting the search. */
603  node->as_whichplan = pstate->pa_next_plan;
604 
605  /* Loop until we find a valid subplan to execute. */
606  while (pstate->pa_finished[pstate->pa_next_plan])
607  {
608  int nextplan;
609 
610  nextplan = bms_next_member(node->as_valid_subplans,
611  pstate->pa_next_plan);
612  if (nextplan >= 0)
613  {
614  /* Advance to the next valid plan. */
615  pstate->pa_next_plan = nextplan;
616  }
617  else if (node->as_whichplan > node->as_first_partial_plan)
618  {
619  /*
620  * Try looping back to the first valid partial plan, if there is
621  * one. If there isn't, arrange to bail out below.
622  */
623  nextplan = bms_next_member(node->as_valid_subplans,
624  node->as_first_partial_plan - 1);
625  pstate->pa_next_plan =
626  nextplan < 0 ? node->as_whichplan : nextplan;
627  }
628  else
629  {
630  /*
631  * At last plan, and either there are no partial plans or we've
632  * tried them all. Arrange to bail out.
633  */
634  pstate->pa_next_plan = node->as_whichplan;
635  }
636 
637  if (pstate->pa_next_plan == node->as_whichplan)
638  {
639  /* We've tried everything! */
641  LWLockRelease(&pstate->pa_lock);
642  return false;
643  }
644  }
645 
646  /* Pick the plan we found, and advance pa_next_plan one more time. */
647  node->as_whichplan = pstate->pa_next_plan;
649  pstate->pa_next_plan);
650 
651  /*
652  * If there are no more valid plans then try setting the next plan to the
653  * first valid partial plan.
654  */
655  if (pstate->pa_next_plan < 0)
656  {
657  int nextplan = bms_next_member(node->as_valid_subplans,
658  node->as_first_partial_plan - 1);
659 
660  if (nextplan >= 0)
661  pstate->pa_next_plan = nextplan;
662  else
663  {
664  /*
665  * There are no valid partial plans, and we already chose the last
666  * non-partial plan; so flag that there's nothing more for our
667  * fellow workers to do.
668  */
670  }
671  }
672 
673  /* If non-partial, immediately mark as finished. */
674  if (node->as_whichplan < node->as_first_partial_plan)
675  node->as_pstate->pa_finished[node->as_whichplan] = true;
676 
677  LWLockRelease(&pstate->pa_lock);
678 
679  return true;
680 }
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1229
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
EState * state
Definition: execnodes.h:947
ScanDirection es_direction
Definition: execnodes.h:507
PlanState ps
Definition: execnodes.h:1221
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
ParallelAppendState * as_pstate
Definition: execnodes.h:1227
int as_first_partial_plan
Definition: execnodes.h:1225
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
Bitmapset * as_valid_subplans
Definition: execnodes.h:1230
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:77
static void mark_invalid_subplans_as_finished(AppendState *node)
Definition: nodeAppend.c:691
#define Assert(condition)
Definition: c.h:745
int as_whichplan
Definition: execnodes.h:1224
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate)

◆ choose_next_subplan_locally()

static bool choose_next_subplan_locally ( AppendState node)
static

Definition at line 437 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_valid_subplans, AppendState::as_whichplan, Assert, bms_next_member(), bms_prev_member(), EState::es_direction, ExecFindMatchingSubPlans(), INVALID_SUBPLAN_INDEX, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecInitAppend().

438 {
439  int whichplan = node->as_whichplan;
440  int nextplan;
441 
442  /* We should never be called when there are no subplans */
443  Assert(node->as_nplans > 0);
444 
445  /*
446  * If first call then have the bms member function choose the first valid
447  * subplan by initializing whichplan to -1. If there happen to be no
448  * valid subplans then the bms member function will handle that by
449  * returning a negative number which will allow us to exit returning a
450  * false value.
451  */
452  if (whichplan == INVALID_SUBPLAN_INDEX)
453  {
454  if (node->as_valid_subplans == NULL)
455  node->as_valid_subplans =
457 
458  whichplan = -1;
459  }
460 
461  /* Ensure whichplan is within the expected range */
462  Assert(whichplan >= -1 && whichplan <= node->as_nplans);
463 
465  nextplan = bms_next_member(node->as_valid_subplans, whichplan);
466  else
467  nextplan = bms_prev_member(node->as_valid_subplans, whichplan);
468 
469  if (nextplan < 0)
470  return false;
471 
472  node->as_whichplan = nextplan;
473 
474  return true;
475 }
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1229
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
EState * state
Definition: execnodes.h:947
ScanDirection es_direction
Definition: execnodes.h:507
PlanState ps
Definition: execnodes.h:1221
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
Bitmapset * as_valid_subplans
Definition: execnodes.h:1230
int bms_prev_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1102
#define Assert(condition)
Definition: c.h:745
int as_whichplan
Definition: execnodes.h:1224
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate)

◆ ExecAppend()

static TupleTableSlot * ExecAppend ( PlanState pstate)
static

Definition at line 232 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, AppendState::as_whichplan, Assert, castNode, CHECK_FOR_INTERRUPTS, AppendState::choose_next_subplan, ExecClearTuple(), ExecProcNode(), INVALID_SUBPLAN_INDEX, AppendState::ps, PlanState::ps_ResultTupleSlot, and TupIsNull.

Referenced by ExecInitAppend().

233 {
234  AppendState *node = castNode(AppendState, pstate);
235 
236  if (node->as_whichplan < 0)
237  {
238  /* Nothing to do if there are no subplans */
239  if (node->as_nplans == 0)
240  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
241 
242  /*
243  * If no subplan has been chosen, we must choose one before
244  * proceeding.
245  */
246  if (node->as_whichplan == INVALID_SUBPLAN_INDEX &&
247  !node->choose_next_subplan(node))
248  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
249  }
250 
251  for (;;)
252  {
253  PlanState *subnode;
254  TupleTableSlot *result;
255 
257 
258  /*
259  * figure out which subplan we are currently processing
260  */
261  Assert(node->as_whichplan >= 0 && node->as_whichplan < node->as_nplans);
262  subnode = node->appendplans[node->as_whichplan];
263 
264  /*
265  * get a tuple from the subplan
266  */
267  result = ExecProcNode(subnode);
268 
269  if (!TupIsNull(result))
270  {
271  /*
272  * If the subplan gave us something then return it as-is. We do
273  * NOT make use of the result slot that was set up in
274  * ExecInitAppend; there's no need for it.
275  */
276  return result;
277  }
278 
279  /* choose new subplan; if none, we're done */
280  if (!node->choose_next_subplan(node))
281  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
282  }
283 }
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
#define castNode(_type_, nodeptr)
Definition: nodes.h:598
PlanState ps
Definition: execnodes.h:1221
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:983
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1231
#define TupIsNull(slot)
Definition: tuptable.h:292
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:240
#define Assert(condition)
Definition: c.h:745
int as_whichplan
Definition: execnodes.h:1224
PlanState ** appendplans
Definition: execnodes.h:1222
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99

◆ ExecAppendEstimate()

void ExecAppendEstimate ( AppendState node,
ParallelContext pcxt 
)

Definition at line 367 of file nodeAppend.c.

References add_size(), AppendState::as_nplans, ParallelContext::estimator, offsetof, ParallelAppendState::pa_finished, AppendState::pstate_len, shm_toc_estimate_chunk, and shm_toc_estimate_keys.

Referenced by ExecParallelEstimate().

369 {
370  node->pstate_len =
371  add_size(offsetof(ParallelAppendState, pa_finished),
372  sizeof(bool) * node->as_nplans);
373 
375  shm_toc_estimate_keys(&pcxt->estimator, 1);
376 }
Size pstate_len
Definition: execnodes.h:1228
shm_toc_estimator estimator
Definition: parallel.h:42
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
#define offsetof(type, field)
Definition: c.h:668

◆ ExecAppendInitializeDSM()

void ExecAppendInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 386 of file nodeAppend.c.

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_leader(), LWLockInitialize(), LWTRANCHE_PARALLEL_APPEND, ParallelAppendState::pa_lock, PlanState::plan, Plan::plan_node_id, AppendState::ps, AppendState::pstate_len, shm_toc_allocate(), shm_toc_insert(), and ParallelContext::toc.

Referenced by ExecParallelInitializeDSM().

388 {
389  ParallelAppendState *pstate;
390 
391  pstate = shm_toc_allocate(pcxt->toc, node->pstate_len);
392  memset(pstate, 0, node->pstate_len);
394  shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate);
395 
396  node->as_pstate = pstate;
398 }
Size pstate_len
Definition: execnodes.h:1228
int plan_node_id
Definition: plannodes.h:141
PlanState ps
Definition: execnodes.h:1221
ParallelAppendState * as_pstate
Definition: execnodes.h:1227
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1231
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:745
Plan * plan
Definition: execnodes.h:945
static bool choose_next_subplan_for_leader(AppendState *node)
Definition: nodeAppend.c:486
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
shm_toc * toc
Definition: parallel.h:45

◆ ExecAppendInitializeWorker()

void ExecAppendInitializeWorker ( AppendState node,
ParallelWorkerContext pwcxt 
)

Definition at line 423 of file nodeAppend.c.

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_worker(), PlanState::plan, Plan::plan_node_id, AppendState::ps, shm_toc_lookup(), and ParallelWorkerContext::toc.

Referenced by ExecParallelInitializeWorker().

424 {
425  node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
427 }
int plan_node_id
Definition: plannodes.h:141
PlanState ps
Definition: execnodes.h:1221
ParallelAppendState * as_pstate
Definition: execnodes.h:1227
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1231
static bool choose_next_subplan_for_worker(AppendState *node)
Definition: nodeAppend.c:567
Plan * plan
Definition: execnodes.h:945
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232

◆ ExecAppendReInitializeDSM()

void ExecAppendReInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 407 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_pstate, ParallelAppendState::pa_finished, and ParallelAppendState::pa_next_plan.

Referenced by ExecParallelReInitializeDSM().

408 {
409  ParallelAppendState *pstate = node->as_pstate;
410 
411  pstate->pa_next_plan = 0;
412  memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans);
413 }
ParallelAppendState * as_pstate
Definition: execnodes.h:1227
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:77

◆ ExecEndAppend()

void ExecEndAppend ( AppendState node)

Definition at line 294 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, ExecEndNode(), and i.

Referenced by ExecEndNode().

295 {
296  PlanState **appendplans;
297  int nplans;
298  int i;
299 
300  /*
301  * get information from the node
302  */
303  appendplans = node->appendplans;
304  nplans = node->as_nplans;
305 
306  /*
307  * shut down each of the subscans
308  */
309  for (i = 0; i < nplans; i++)
310  ExecEndNode(appendplans[i]);
311 }
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:543
PlanState ** appendplans
Definition: execnodes.h:1222
int i

◆ ExecInitAppend()

AppendState* ExecInitAppend ( Append node,
EState estate,
int  eflags 
)

Definition at line 100 of file nodeAppend.c.

References Append::appendplans, AppendState::appendplans, AppendState::as_first_partial_plan, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_valid_subplans, AppendState::as_whichplan, Assert, bms_add_range(), bms_next_member(), bms_num_members(), AppendState::choose_next_subplan, choose_next_subplan_locally(), PartitionPruneState::do_exec_prune, PartitionPruneState::do_initial_prune, EXEC_FLAG_MARK, ExecAppend(), ExecAssignExprContext(), ExecCreatePartitionPruneState(), ExecFindInitialMatchingSubPlans(), ExecInitNode(), ExecInitResultTupleSlotTL(), PlanState::ExecProcNode, Append::first_partial_plan, i, INVALID_SUBPLAN_INDEX, list_length(), list_nth(), makeNode, palloc(), Append::part_prune_info, PlanState::plan, AppendState::ps, PlanState::ps_ProjInfo, PlanState::resultopsfixed, PlanState::resultopsset, PlanState::state, and TTSOpsVirtual.

Referenced by ExecInitNode().

101 {
102  AppendState *appendstate = makeNode(AppendState);
103  PlanState **appendplanstates;
104  Bitmapset *validsubplans;
105  int nplans;
106  int firstvalid;
107  int i,
108  j;
109 
110  /* check for unsupported flags */
111  Assert(!(eflags & EXEC_FLAG_MARK));
112 
113  /*
114  * create new AppendState for our append node
115  */
116  appendstate->ps.plan = (Plan *) node;
117  appendstate->ps.state = estate;
118  appendstate->ps.ExecProcNode = ExecAppend;
119 
120  /* Let choose_next_subplan_* function handle setting the first subplan */
121  appendstate->as_whichplan = INVALID_SUBPLAN_INDEX;
122 
123  /* If run-time partition pruning is enabled, then set that up now */
124  if (node->part_prune_info != NULL)
125  {
126  PartitionPruneState *prunestate;
127 
128  /* We may need an expression context to evaluate partition exprs */
129  ExecAssignExprContext(estate, &appendstate->ps);
130 
131  /* Create the working data structure for pruning. */
132  prunestate = ExecCreatePartitionPruneState(&appendstate->ps,
133  node->part_prune_info);
134  appendstate->as_prune_state = prunestate;
135 
136  /* Perform an initial partition prune, if required. */
137  if (prunestate->do_initial_prune)
138  {
139  /* Determine which subplans survive initial pruning */
140  validsubplans = ExecFindInitialMatchingSubPlans(prunestate,
141  list_length(node->appendplans));
142 
143  nplans = bms_num_members(validsubplans);
144  }
145  else
146  {
147  /* We'll need to initialize all subplans */
148  nplans = list_length(node->appendplans);
149  Assert(nplans > 0);
150  validsubplans = bms_add_range(NULL, 0, nplans - 1);
151  }
152 
153  /*
154  * When no run-time pruning is required and there's at least one
155  * subplan, we can fill as_valid_subplans immediately, preventing
156  * later calls to ExecFindMatchingSubPlans.
157  */
158  if (!prunestate->do_exec_prune && nplans > 0)
159  appendstate->as_valid_subplans = bms_add_range(NULL, 0, nplans - 1);
160  }
161  else
162  {
163  nplans = list_length(node->appendplans);
164 
165  /*
166  * When run-time partition pruning is not enabled we can just mark all
167  * subplans as valid; they must also all be initialized.
168  */
169  Assert(nplans > 0);
170  appendstate->as_valid_subplans = validsubplans =
171  bms_add_range(NULL, 0, nplans - 1);
172  appendstate->as_prune_state = NULL;
173  }
174 
175  /*
176  * Initialize result tuple type and slot.
177  */
178  ExecInitResultTupleSlotTL(&appendstate->ps, &TTSOpsVirtual);
179 
180  /* node returns slots from each of its subnodes, therefore not fixed */
181  appendstate->ps.resultopsset = true;
182  appendstate->ps.resultopsfixed = false;
183 
184  appendplanstates = (PlanState **) palloc(nplans *
185  sizeof(PlanState *));
186 
187  /*
188  * call ExecInitNode on each of the valid plans to be executed and save
189  * the results into the appendplanstates array.
190  *
191  * While at it, find out the first valid partial plan.
192  */
193  j = 0;
194  firstvalid = nplans;
195  i = -1;
196  while ((i = bms_next_member(validsubplans, i)) >= 0)
197  {
198  Plan *initNode = (Plan *) list_nth(node->appendplans, i);
199 
200  /*
201  * Record the lowest appendplans index which is a valid partial plan.
202  */
203  if (i >= node->first_partial_plan && j < firstvalid)
204  firstvalid = j;
205 
206  appendplanstates[j++] = ExecInitNode(initNode, estate, eflags);
207  }
208 
209  appendstate->as_first_partial_plan = firstvalid;
210  appendstate->appendplans = appendplanstates;
211  appendstate->as_nplans = nplans;
212 
213  /*
214  * Miscellaneous initialization
215  */
216 
217  appendstate->ps.ps_ProjInfo = NULL;
218 
219  /* For parallel query, this will be overridden later. */
221 
222  return appendstate;
223 }
static TupleTableSlot * ExecAppend(PlanState *pstate)
Definition: nodeAppend.c:232
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:985
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1229
static bool choose_next_subplan_locally(AppendState *node)
Definition: nodeAppend.c:437
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
EState * state
Definition: execnodes.h:947
PlanState ps
Definition: execnodes.h:1221
List * appendplans
Definition: plannodes.h:255
Bitmapset * bms_add_range(Bitmapset *a, int lower, int upper)
Definition: bitmapset.c:834
static void * list_nth(const List *list, int n)
Definition: pg_list.h:277
int first_partial_plan
Definition: plannodes.h:261
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1231
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:646
Bitmapset * ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate, int nsubplans)
int as_first_partial_plan
Definition: execnodes.h:1225
bool resultopsset
Definition: execnodes.h:1028
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:951
Bitmapset * as_valid_subplans
Definition: execnodes.h:1230
Plan * plan
Definition: execnodes.h:945
#define makeNode(_type_)
Definition: nodes.h:577
struct PartitionPruneInfo * part_prune_info
Definition: plannodes.h:264
#define Assert(condition)
Definition: c.h:745
#define EXEC_FLAG_MARK
Definition: executor.h:59
int as_whichplan
Definition: execnodes.h:1224
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:485
static int list_length(const List *l)
Definition: pg_list.h:169
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1769
bool resultopsfixed
Definition: execnodes.h:1024
void * palloc(Size size)
Definition: mcxt.c:949
PlanState ** appendplans
Definition: execnodes.h:1222
int i
PartitionPruneState * ExecCreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *partitionpruneinfo)
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:139

◆ ExecReScanAppend()

void ExecReScanAppend ( AppendState node)

Definition at line 314 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_valid_subplans, AppendState::as_whichplan, bms_free(), bms_overlap(), PlanState::chgParam, PartitionPruneState::execparamids, ExecReScan(), i, INVALID_SUBPLAN_INDEX, AppendState::ps, and UpdateChangedParamSet().

Referenced by ExecReScan().

315 {
316  int i;
317 
318  /*
319  * If any PARAM_EXEC Params used in pruning expressions have changed, then
320  * we'd better unset the valid subplans so that they are reselected for
321  * the new parameter values.
322  */
323  if (node->as_prune_state &&
324  bms_overlap(node->ps.chgParam,
326  {
328  node->as_valid_subplans = NULL;
329  }
330 
331  for (i = 0; i < node->as_nplans; i++)
332  {
333  PlanState *subnode = node->appendplans[i];
334 
335  /*
336  * ExecReScan doesn't know about my subplans, so I have to do
337  * changed-parameter signaling myself.
338  */
339  if (node->ps.chgParam != NULL)
340  UpdateChangedParamSet(subnode, node->ps.chgParam);
341 
342  /*
343  * If chgParam of subnode is not null then plan will be re-scanned by
344  * first ExecProcNode.
345  */
346  if (subnode->chgParam == NULL)
347  ExecReScan(subnode);
348  }
349 
350  /* Let choose_next_subplan_* function handle setting the first subplan */
352 }
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1229
void ExecReScan(PlanState *node)
Definition: execAmi.c:76
PlanState ps
Definition: execnodes.h:1221
Bitmapset * execparamids
Bitmapset * chgParam
Definition: execnodes.h:977
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
Bitmapset * as_valid_subplans
Definition: execnodes.h:1230
void UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
Definition: execUtils.c:843
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
int as_whichplan
Definition: execnodes.h:1224
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:494
PlanState ** appendplans
Definition: execnodes.h:1222
int i

◆ mark_invalid_subplans_as_finished()

static void mark_invalid_subplans_as_finished ( AppendState node)
static

Definition at line 691 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_pstate, AppendState::as_valid_subplans, Assert, bms_is_member(), bms_num_members(), i, and ParallelAppendState::pa_finished.

Referenced by choose_next_subplan_for_leader(), and choose_next_subplan_for_worker().

692 {
693  int i;
694 
695  /* Only valid to call this while in parallel Append mode */
696  Assert(node->as_pstate);
697 
698  /* Shouldn't have been called when run-time pruning is not enabled */
699  Assert(node->as_prune_state);
700 
701  /* Nothing to do if all plans are valid */
702  if (bms_num_members(node->as_valid_subplans) == node->as_nplans)
703  return;
704 
705  /* Mark all non-valid plans as finished */
706  for (i = 0; i < node->as_nplans; i++)
707  {
708  if (!bms_is_member(i, node->as_valid_subplans))
709  node->as_pstate->pa_finished[i] = true;
710  }
711 }
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1229
ParallelAppendState * as_pstate
Definition: execnodes.h:1227
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:646
Bitmapset * as_valid_subplans
Definition: execnodes.h:1230
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:77
#define Assert(condition)
Definition: c.h:745
int i
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427