PostgreSQL Source Code  git master
nodeAppend.c File Reference
#include "postgres.h"
#include "executor/execdebug.h"
#include "executor/execPartition.h"
#include "executor/nodeAppend.h"
#include "miscadmin.h"
Include dependency graph for nodeAppend.c:

Go to the source code of this file.

Data Structures

struct  ParallelAppendState
 

Macros

#define INVALID_SUBPLAN_INDEX   -1
 
#define NO_MATCHING_SUBPLANS   -2
 

Functions

static TupleTableSlotExecAppend (PlanState *pstate)
 
static bool choose_next_subplan_locally (AppendState *node)
 
static bool choose_next_subplan_for_leader (AppendState *node)
 
static bool choose_next_subplan_for_worker (AppendState *node)
 
static void mark_invalid_subplans_as_finished (AppendState *node)
 
AppendStateExecInitAppend (Append *node, EState *estate, int eflags)
 
void ExecEndAppend (AppendState *node)
 
void ExecReScanAppend (AppendState *node)
 
void ExecAppendEstimate (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendReInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeWorker (AppendState *node, ParallelWorkerContext *pwcxt)
 

Macro Definition Documentation

◆ INVALID_SUBPLAN_INDEX

◆ NO_MATCHING_SUBPLANS

#define NO_MATCHING_SUBPLANS   -2

Function Documentation

◆ choose_next_subplan_for_leader()

static bool choose_next_subplan_for_leader ( AppendState node)
static

Definition at line 506 of file nodeAppend.c.

References AppendState::as_first_partial_plan, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_pstate, AppendState::as_valid_subplans, AppendState::as_whichplan, Assert, EState::es_direction, ExecFindMatchingSubPlans(), INVALID_SUBPLAN_INDEX, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), mark_invalid_subplans_as_finished(), NO_MATCHING_SUBPLANS, ParallelAppendState::pa_finished, ParallelAppendState::pa_lock, ParallelAppendState::pa_next_plan, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecAppendInitializeDSM().

507 {
508  ParallelAppendState *pstate = node->as_pstate;
509 
510  /* Backward scan is not supported by parallel-aware plans */
512 
513  /* We should never be called when there are no subplans */
515 
517 
518  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
519  {
520  /* Mark just-completed subplan as finished. */
521  node->as_pstate->pa_finished[node->as_whichplan] = true;
522  }
523  else
524  {
525  /* Start with last subplan. */
526  node->as_whichplan = node->as_nplans - 1;
527 
528  /*
529  * If we've yet to determine the valid subplans then do so now. If
530  * run-time pruning is disabled then the valid subplans will always be
531  * set to all subplans.
532  */
533  if (node->as_valid_subplans == NULL)
534  {
535  node->as_valid_subplans =
537 
538  /*
539  * Mark each invalid plan as finished to allow the loop below to
540  * select the first valid subplan.
541  */
543  }
544  }
545 
546  /* Loop until we find a subplan to execute. */
547  while (pstate->pa_finished[node->as_whichplan])
548  {
549  if (node->as_whichplan == 0)
550  {
553  LWLockRelease(&pstate->pa_lock);
554  return false;
555  }
556 
557  /*
558  * We needn't pay attention to as_valid_subplans here as all invalid
559  * plans have been marked as finished.
560  */
561  node->as_whichplan--;
562  }
563 
564  /* If non-partial, immediately mark as finished. */
565  if (node->as_whichplan < node->as_first_partial_plan)
566  node->as_pstate->pa_finished[node->as_whichplan] = true;
567 
568  LWLockRelease(&pstate->pa_lock);
569 
570  return true;
571 }
#define NO_MATCHING_SUBPLANS
Definition: nodeAppend.c:81
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1225
EState * state
Definition: execnodes.h:942
ScanDirection es_direction
Definition: execnodes.h:502
PlanState ps
Definition: execnodes.h:1217
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
ParallelAppendState * as_pstate
Definition: execnodes.h:1223
int as_first_partial_plan
Definition: execnodes.h:1221
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
Bitmapset * as_valid_subplans
Definition: execnodes.h:1226
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:77
static void mark_invalid_subplans_as_finished(AppendState *node)
Definition: nodeAppend.c:711
#define Assert(condition)
Definition: c.h:732
int as_whichplan
Definition: execnodes.h:1220
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate)

◆ choose_next_subplan_for_worker()

static bool choose_next_subplan_for_worker ( AppendState node)
static

Definition at line 587 of file nodeAppend.c.

References AppendState::as_first_partial_plan, AppendState::as_prune_state, AppendState::as_pstate, AppendState::as_valid_subplans, AppendState::as_whichplan, Assert, bms_next_member(), EState::es_direction, ExecFindMatchingSubPlans(), INVALID_SUBPLAN_INDEX, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), mark_invalid_subplans_as_finished(), NO_MATCHING_SUBPLANS, ParallelAppendState::pa_finished, ParallelAppendState::pa_lock, ParallelAppendState::pa_next_plan, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecAppendInitializeWorker().

588 {
589  ParallelAppendState *pstate = node->as_pstate;
590 
591  /* Backward scan is not supported by parallel-aware plans */
593 
594  /* We should never be called when there are no subplans */
596 
598 
599  /* Mark just-completed subplan as finished. */
600  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
601  node->as_pstate->pa_finished[node->as_whichplan] = true;
602 
603  /*
604  * If we've yet to determine the valid subplans then do so now. If
605  * run-time pruning is disabled then the valid subplans will always be set
606  * to all subplans.
607  */
608  else if (node->as_valid_subplans == NULL)
609  {
610  node->as_valid_subplans =
613  }
614 
615  /* If all the plans are already done, we have nothing to do */
616  if (pstate->pa_next_plan == INVALID_SUBPLAN_INDEX)
617  {
618  LWLockRelease(&pstate->pa_lock);
619  return false;
620  }
621 
622  /* Save the plan from which we are starting the search. */
623  node->as_whichplan = pstate->pa_next_plan;
624 
625  /* Loop until we find a valid subplan to execute. */
626  while (pstate->pa_finished[pstate->pa_next_plan])
627  {
628  int nextplan;
629 
630  nextplan = bms_next_member(node->as_valid_subplans,
631  pstate->pa_next_plan);
632  if (nextplan >= 0)
633  {
634  /* Advance to the next valid plan. */
635  pstate->pa_next_plan = nextplan;
636  }
637  else if (node->as_whichplan > node->as_first_partial_plan)
638  {
639  /*
640  * Try looping back to the first valid partial plan, if there is
641  * one. If there isn't, arrange to bail out below.
642  */
643  nextplan = bms_next_member(node->as_valid_subplans,
644  node->as_first_partial_plan - 1);
645  pstate->pa_next_plan =
646  nextplan < 0 ? node->as_whichplan : nextplan;
647  }
648  else
649  {
650  /*
651  * At last plan, and either there are no partial plans or we've
652  * tried them all. Arrange to bail out.
653  */
654  pstate->pa_next_plan = node->as_whichplan;
655  }
656 
657  if (pstate->pa_next_plan == node->as_whichplan)
658  {
659  /* We've tried everything! */
661  LWLockRelease(&pstate->pa_lock);
662  return false;
663  }
664  }
665 
666  /* Pick the plan we found, and advance pa_next_plan one more time. */
667  node->as_whichplan = pstate->pa_next_plan;
669  pstate->pa_next_plan);
670 
671  /*
672  * If there are no more valid plans then try setting the next plan to the
673  * first valid partial plan.
674  */
675  if (pstate->pa_next_plan < 0)
676  {
677  int nextplan = bms_next_member(node->as_valid_subplans,
678  node->as_first_partial_plan - 1);
679 
680  if (nextplan >= 0)
681  pstate->pa_next_plan = nextplan;
682  else
683  {
684  /*
685  * There are no valid partial plans, and we already chose the last
686  * non-partial plan; so flag that there's nothing more for our
687  * fellow workers to do.
688  */
690  }
691  }
692 
693  /* If non-partial, immediately mark as finished. */
694  if (node->as_whichplan < node->as_first_partial_plan)
695  node->as_pstate->pa_finished[node->as_whichplan] = true;
696 
697  LWLockRelease(&pstate->pa_lock);
698 
699  return true;
700 }
#define NO_MATCHING_SUBPLANS
Definition: nodeAppend.c:81
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1225
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
EState * state
Definition: execnodes.h:942
ScanDirection es_direction
Definition: execnodes.h:502
PlanState ps
Definition: execnodes.h:1217
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
ParallelAppendState * as_pstate
Definition: execnodes.h:1223
int as_first_partial_plan
Definition: execnodes.h:1221
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
Bitmapset * as_valid_subplans
Definition: execnodes.h:1226
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:77
static void mark_invalid_subplans_as_finished(AppendState *node)
Definition: nodeAppend.c:711
#define Assert(condition)
Definition: c.h:732
int as_whichplan
Definition: execnodes.h:1220
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate)

◆ choose_next_subplan_locally()

static bool choose_next_subplan_locally ( AppendState node)
static

Definition at line 457 of file nodeAppend.c.

References AppendState::as_prune_state, AppendState::as_valid_subplans, AppendState::as_whichplan, Assert, bms_next_member(), bms_prev_member(), EState::es_direction, ExecFindMatchingSubPlans(), INVALID_SUBPLAN_INDEX, NO_MATCHING_SUBPLANS, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecInitAppend().

458 {
459  int whichplan = node->as_whichplan;
460  int nextplan;
461 
462  /* We should never be called when there are no subplans */
463  Assert(whichplan != NO_MATCHING_SUBPLANS);
464 
465  /*
466  * If first call then have the bms member function choose the first valid
467  * subplan by initializing whichplan to -1. If there happen to be no
468  * valid subplans then the bms member function will handle that by
469  * returning a negative number which will allow us to exit returning a
470  * false value.
471  */
472  if (whichplan == INVALID_SUBPLAN_INDEX)
473  {
474  if (node->as_valid_subplans == NULL)
475  node->as_valid_subplans =
477 
478  whichplan = -1;
479  }
480 
481  /* Ensure whichplan is within the expected range */
482  Assert(whichplan >= -1 && whichplan <= node->as_nplans);
483 
485  nextplan = bms_next_member(node->as_valid_subplans, whichplan);
486  else
487  nextplan = bms_prev_member(node->as_valid_subplans, whichplan);
488 
489  if (nextplan < 0)
490  return false;
491 
492  node->as_whichplan = nextplan;
493 
494  return true;
495 }
#define NO_MATCHING_SUBPLANS
Definition: nodeAppend.c:81
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1225
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
EState * state
Definition: execnodes.h:942
ScanDirection es_direction
Definition: execnodes.h:502
PlanState ps
Definition: execnodes.h:1217
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
Bitmapset * as_valid_subplans
Definition: execnodes.h:1226
int bms_prev_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1102
#define Assert(condition)
Definition: c.h:732
int as_whichplan
Definition: execnodes.h:1220
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate)

◆ ExecAppend()

static TupleTableSlot * ExecAppend ( PlanState pstate)
static

Definition at line 252 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, AppendState::as_whichplan, Assert, castNode, CHECK_FOR_INTERRUPTS, AppendState::choose_next_subplan, ExecClearTuple(), ExecProcNode(), INVALID_SUBPLAN_INDEX, NO_MATCHING_SUBPLANS, AppendState::ps, PlanState::ps_ResultTupleSlot, and TupIsNull.

Referenced by ExecInitAppend().

253 {
254  AppendState *node = castNode(AppendState, pstate);
255 
256  if (node->as_whichplan < 0)
257  {
258  /*
259  * If no subplan has been chosen, we must choose one before
260  * proceeding.
261  */
262  if (node->as_whichplan == INVALID_SUBPLAN_INDEX &&
263  !node->choose_next_subplan(node))
264  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
265 
266  /* Nothing to do if there are no matching subplans */
267  else if (node->as_whichplan == NO_MATCHING_SUBPLANS)
268  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
269  }
270 
271  for (;;)
272  {
273  PlanState *subnode;
274  TupleTableSlot *result;
275 
277 
278  /*
279  * figure out which subplan we are currently processing
280  */
281  Assert(node->as_whichplan >= 0 && node->as_whichplan < node->as_nplans);
282  subnode = node->appendplans[node->as_whichplan];
283 
284  /*
285  * get a tuple from the subplan
286  */
287  result = ExecProcNode(subnode);
288 
289  if (!TupIsNull(result))
290  {
291  /*
292  * If the subplan gave us something then return it as-is. We do
293  * NOT make use of the result slot that was set up in
294  * ExecInitAppend; there's no need for it.
295  */
296  return result;
297  }
298 
299  /* choose new subplan; if none, we're done */
300  if (!node->choose_next_subplan(node))
301  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
302  }
303 }
#define NO_MATCHING_SUBPLANS
Definition: nodeAppend.c:81
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:426
#define castNode(_type_, nodeptr)
Definition: nodes.h:594
PlanState ps
Definition: execnodes.h:1217
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:978
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1227
#define TupIsNull(slot)
Definition: tuptable.h:293
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:235
#define Assert(condition)
Definition: c.h:732
int as_whichplan
Definition: execnodes.h:1220
PlanState ** appendplans
Definition: execnodes.h:1218
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99

◆ ExecAppendEstimate()

void ExecAppendEstimate ( AppendState node,
ParallelContext pcxt 
)

Definition at line 387 of file nodeAppend.c.

References add_size(), AppendState::as_nplans, ParallelContext::estimator, offsetof, ParallelAppendState::pa_finished, AppendState::pstate_len, shm_toc_estimate_chunk, and shm_toc_estimate_keys.

Referenced by ExecParallelEstimate().

389 {
390  node->pstate_len =
391  add_size(offsetof(ParallelAppendState, pa_finished),
392  sizeof(bool) * node->as_nplans);
393 
395  shm_toc_estimate_keys(&pcxt->estimator, 1);
396 }
Size pstate_len
Definition: execnodes.h:1224
shm_toc_estimator estimator
Definition: parallel.h:41
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
#define offsetof(type, field)
Definition: c.h:655

◆ ExecAppendInitializeDSM()

void ExecAppendInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 406 of file nodeAppend.c.

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_leader(), LWLockInitialize(), LWTRANCHE_PARALLEL_APPEND, ParallelAppendState::pa_lock, PlanState::plan, Plan::plan_node_id, AppendState::ps, AppendState::pstate_len, shm_toc_allocate(), shm_toc_insert(), and ParallelContext::toc.

Referenced by ExecParallelInitializeDSM().

408 {
409  ParallelAppendState *pstate;
410 
411  pstate = shm_toc_allocate(pcxt->toc, node->pstate_len);
412  memset(pstate, 0, node->pstate_len);
414  shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate);
415 
416  node->as_pstate = pstate;
418 }
Size pstate_len
Definition: execnodes.h:1224
int plan_node_id
Definition: plannodes.h:139
PlanState ps
Definition: execnodes.h:1217
ParallelAppendState * as_pstate
Definition: execnodes.h:1223
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1227
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:678
Plan * plan
Definition: execnodes.h:940
static bool choose_next_subplan_for_leader(AppendState *node)
Definition: nodeAppend.c:506
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
shm_toc * toc
Definition: parallel.h:44

◆ ExecAppendInitializeWorker()

void ExecAppendInitializeWorker ( AppendState node,
ParallelWorkerContext pwcxt 
)

Definition at line 443 of file nodeAppend.c.

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_worker(), PlanState::plan, Plan::plan_node_id, AppendState::ps, shm_toc_lookup(), and ParallelWorkerContext::toc.

Referenced by ExecParallelInitializeWorker().

444 {
445  node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
447 }
int plan_node_id
Definition: plannodes.h:139
PlanState ps
Definition: execnodes.h:1217
ParallelAppendState * as_pstate
Definition: execnodes.h:1223
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1227
static bool choose_next_subplan_for_worker(AppendState *node)
Definition: nodeAppend.c:587
Plan * plan
Definition: execnodes.h:940
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232

◆ ExecAppendReInitializeDSM()

void ExecAppendReInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 427 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_pstate, ParallelAppendState::pa_finished, and ParallelAppendState::pa_next_plan.

Referenced by ExecParallelReInitializeDSM().

428 {
429  ParallelAppendState *pstate = node->as_pstate;
430 
431  pstate->pa_next_plan = 0;
432  memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans);
433 }
ParallelAppendState * as_pstate
Definition: execnodes.h:1223
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:77

◆ ExecEndAppend()

void ExecEndAppend ( AppendState node)

Definition at line 314 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, ExecEndNode(), and i.

Referenced by ExecEndNode().

315 {
316  PlanState **appendplans;
317  int nplans;
318  int i;
319 
320  /*
321  * get information from the node
322  */
323  appendplans = node->appendplans;
324  nplans = node->as_nplans;
325 
326  /*
327  * shut down each of the subscans
328  */
329  for (i = 0; i < nplans; i++)
330  ExecEndNode(appendplans[i]);
331 }
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:538
PlanState ** appendplans
Definition: execnodes.h:1218
int i

◆ ExecInitAppend()

AppendState* ExecInitAppend ( Append node,
EState estate,
int  eflags 
)

Definition at line 101 of file nodeAppend.c.

References Append::appendplans, AppendState::appendplans, AppendState::as_first_partial_plan, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_valid_subplans, AppendState::as_whichplan, Assert, bms_add_range(), bms_is_empty(), bms_make_singleton(), bms_next_member(), bms_num_members(), AppendState::choose_next_subplan, choose_next_subplan_locally(), PartitionPruneState::do_exec_prune, PartitionPruneState::do_initial_prune, EXEC_FLAG_MARK, ExecAppend(), ExecAssignExprContext(), ExecCreatePartitionPruneState(), ExecFindInitialMatchingSubPlans(), ExecInitNode(), ExecInitResultTupleSlotTL(), PlanState::ExecProcNode, Append::first_partial_plan, i, INVALID_SUBPLAN_INDEX, list_length(), list_nth(), makeNode, NO_MATCHING_SUBPLANS, palloc(), Append::part_prune_info, PlanState::plan, AppendState::ps, PlanState::ps_ProjInfo, PlanState::resultopsfixed, PlanState::resultopsset, PlanState::state, and TTSOpsVirtual.

Referenced by ExecInitNode().

102 {
103  AppendState *appendstate = makeNode(AppendState);
104  PlanState **appendplanstates;
105  Bitmapset *validsubplans;
106  int nplans;
107  int firstvalid;
108  int i,
109  j;
110 
111  /* check for unsupported flags */
112  Assert(!(eflags & EXEC_FLAG_MARK));
113 
114  /*
115  * create new AppendState for our append node
116  */
117  appendstate->ps.plan = (Plan *) node;
118  appendstate->ps.state = estate;
119  appendstate->ps.ExecProcNode = ExecAppend;
120 
121  /* Let choose_next_subplan_* function handle setting the first subplan */
122  appendstate->as_whichplan = INVALID_SUBPLAN_INDEX;
123 
124  /* If run-time partition pruning is enabled, then set that up now */
125  if (node->part_prune_info != NULL)
126  {
127  PartitionPruneState *prunestate;
128 
129  /* We may need an expression context to evaluate partition exprs */
130  ExecAssignExprContext(estate, &appendstate->ps);
131 
132  /* Create the working data structure for pruning. */
133  prunestate = ExecCreatePartitionPruneState(&appendstate->ps,
134  node->part_prune_info);
135  appendstate->as_prune_state = prunestate;
136 
137  /* Perform an initial partition prune, if required. */
138  if (prunestate->do_initial_prune)
139  {
140  /* Determine which subplans survive initial pruning */
141  validsubplans = ExecFindInitialMatchingSubPlans(prunestate,
142  list_length(node->appendplans));
143 
144  /*
145  * The case where no subplans survive pruning must be handled
146  * specially. The problem here is that code in explain.c requires
147  * an Append to have at least one subplan in order for it to
148  * properly determine the Vars in that subplan's targetlist. We
149  * sidestep this issue by just initializing the first subplan and
150  * setting as_whichplan to NO_MATCHING_SUBPLANS to indicate that
151  * we don't really need to scan any subnodes.
152  */
153  if (bms_is_empty(validsubplans))
154  {
155  appendstate->as_whichplan = NO_MATCHING_SUBPLANS;
156 
157  /* Mark the first as valid so that it's initialized below */
158  validsubplans = bms_make_singleton(0);
159  }
160 
161  nplans = bms_num_members(validsubplans);
162  }
163  else
164  {
165  /* We'll need to initialize all subplans */
166  nplans = list_length(node->appendplans);
167  Assert(nplans > 0);
168  validsubplans = bms_add_range(NULL, 0, nplans - 1);
169  }
170 
171  /*
172  * If no runtime pruning is required, we can fill as_valid_subplans
173  * immediately, preventing later calls to ExecFindMatchingSubPlans.
174  */
175  if (!prunestate->do_exec_prune)
176  {
177  Assert(nplans > 0);
178  appendstate->as_valid_subplans = bms_add_range(NULL, 0, nplans - 1);
179  }
180  }
181  else
182  {
183  nplans = list_length(node->appendplans);
184 
185  /*
186  * When run-time partition pruning is not enabled we can just mark all
187  * subplans as valid; they must also all be initialized.
188  */
189  Assert(nplans > 0);
190  appendstate->as_valid_subplans = validsubplans =
191  bms_add_range(NULL, 0, nplans - 1);
192  appendstate->as_prune_state = NULL;
193  }
194 
195  /*
196  * Initialize result tuple type and slot.
197  */
198  ExecInitResultTupleSlotTL(&appendstate->ps, &TTSOpsVirtual);
199 
200  /* node returns slots from each of its subnodes, therefore not fixed */
201  appendstate->ps.resultopsset = true;
202  appendstate->ps.resultopsfixed = false;
203 
204  appendplanstates = (PlanState **) palloc(nplans *
205  sizeof(PlanState *));
206 
207  /*
208  * call ExecInitNode on each of the valid plans to be executed and save
209  * the results into the appendplanstates array.
210  *
211  * While at it, find out the first valid partial plan.
212  */
213  j = 0;
214  firstvalid = nplans;
215  i = -1;
216  while ((i = bms_next_member(validsubplans, i)) >= 0)
217  {
218  Plan *initNode = (Plan *) list_nth(node->appendplans, i);
219 
220  /*
221  * Record the lowest appendplans index which is a valid partial plan.
222  */
223  if (i >= node->first_partial_plan && j < firstvalid)
224  firstvalid = j;
225 
226  appendplanstates[j++] = ExecInitNode(initNode, estate, eflags);
227  }
228 
229  appendstate->as_first_partial_plan = firstvalid;
230  appendstate->appendplans = appendplanstates;
231  appendstate->as_nplans = nplans;
232 
233  /*
234  * Miscellaneous initialization
235  */
236 
237  appendstate->ps.ps_ProjInfo = NULL;
238 
239  /* For parallel query, this will be overridden later. */
241 
242  return appendstate;
243 }
static TupleTableSlot * ExecAppend(PlanState *pstate)
Definition: nodeAppend.c:252
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:980
#define NO_MATCHING_SUBPLANS
Definition: nodeAppend.c:81
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1225
static bool choose_next_subplan_locally(AppendState *node)
Definition: nodeAppend.c:457
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:84
EState * state
Definition: execnodes.h:942
PlanState ps
Definition: execnodes.h:1217
List * appendplans
Definition: plannodes.h:252
Bitmapset * bms_add_range(Bitmapset *a, int lower, int upper)
Definition: bitmapset.c:834
static void * list_nth(const List *list, int n)
Definition: pg_list.h:277
int first_partial_plan
Definition: plannodes.h:258
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1227
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:646
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:186
Bitmapset * ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate, int nsubplans)
int as_first_partial_plan
Definition: execnodes.h:1221
bool resultopsset
Definition: execnodes.h:1023
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:946
Bitmapset * as_valid_subplans
Definition: execnodes.h:1226
Plan * plan
Definition: execnodes.h:940
#define makeNode(_type_)
Definition: nodes.h:573
struct PartitionPruneInfo * part_prune_info
Definition: plannodes.h:261
#define Assert(condition)
Definition: c.h:732
#define EXEC_FLAG_MARK
Definition: executor.h:59
int as_whichplan
Definition: execnodes.h:1220
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:444
static int list_length(const List *l)
Definition: pg_list.h:169
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1764
bool resultopsfixed
Definition: execnodes.h:1019
void * palloc(Size size)
Definition: mcxt.c:949
PlanState ** appendplans
Definition: execnodes.h:1218
int i
PartitionPruneState * ExecCreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *partitionpruneinfo)
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:139

◆ ExecReScanAppend()

void ExecReScanAppend ( AppendState node)

Definition at line 334 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_valid_subplans, AppendState::as_whichplan, bms_free(), bms_overlap(), PlanState::chgParam, PartitionPruneState::execparamids, ExecReScan(), i, INVALID_SUBPLAN_INDEX, AppendState::ps, and UpdateChangedParamSet().

Referenced by ExecReScan().

335 {
336  int i;
337 
338  /*
339  * If any PARAM_EXEC Params used in pruning expressions have changed, then
340  * we'd better unset the valid subplans so that they are reselected for
341  * the new parameter values.
342  */
343  if (node->as_prune_state &&
344  bms_overlap(node->ps.chgParam,
346  {
348  node->as_valid_subplans = NULL;
349  }
350 
351  for (i = 0; i < node->as_nplans; i++)
352  {
353  PlanState *subnode = node->appendplans[i];
354 
355  /*
356  * ExecReScan doesn't know about my subplans, so I have to do
357  * changed-parameter signaling myself.
358  */
359  if (node->ps.chgParam != NULL)
360  UpdateChangedParamSet(subnode, node->ps.chgParam);
361 
362  /*
363  * If chgParam of subnode is not null then plan will be re-scanned by
364  * first ExecProcNode.
365  */
366  if (subnode->chgParam == NULL)
367  ExecReScan(subnode);
368  }
369 
370  /* Let choose_next_subplan_* function handle setting the first subplan */
372 }
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1225
void ExecReScan(PlanState *node)
Definition: execAmi.c:77
PlanState ps
Definition: execnodes.h:1217
Bitmapset * execparamids
Bitmapset * chgParam
Definition: execnodes.h:972
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:80
Bitmapset * as_valid_subplans
Definition: execnodes.h:1226
void UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
Definition: execUtils.c:802
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
int as_whichplan
Definition: execnodes.h:1220
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:494
PlanState ** appendplans
Definition: execnodes.h:1218
int i

◆ mark_invalid_subplans_as_finished()

static void mark_invalid_subplans_as_finished ( AppendState node)
static

Definition at line 711 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_pstate, AppendState::as_valid_subplans, Assert, bms_is_member(), bms_num_members(), i, and ParallelAppendState::pa_finished.

Referenced by choose_next_subplan_for_leader(), and choose_next_subplan_for_worker().

712 {
713  int i;
714 
715  /* Only valid to call this while in parallel Append mode */
716  Assert(node->as_pstate);
717 
718  /* Shouldn't have been called when run-time pruning is not enabled */
719  Assert(node->as_prune_state);
720 
721  /* Nothing to do if all plans are valid */
722  if (bms_num_members(node->as_valid_subplans) == node->as_nplans)
723  return;
724 
725  /* Mark all non-valid plans as finished */
726  for (i = 0; i < node->as_nplans; i++)
727  {
728  if (!bms_is_member(i, node->as_valid_subplans))
729  node->as_pstate->pa_finished[i] = true;
730  }
731 }
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1225
ParallelAppendState * as_pstate
Definition: execnodes.h:1223
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:646
Bitmapset * as_valid_subplans
Definition: execnodes.h:1226
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:77
#define Assert(condition)
Definition: c.h:732
int i
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427