PostgreSQL Source Code  git master
nodeAppend.c File Reference
#include "postgres.h"
#include "executor/execdebug.h"
#include "executor/nodeAppend.h"
#include "miscadmin.h"
Include dependency graph for nodeAppend.c:

Go to the source code of this file.

Data Structures

struct  ParallelAppendState
 

Macros

#define INVALID_SUBPLAN_INDEX   -1
 

Functions

static TupleTableSlotExecAppend (PlanState *pstate)
 
static bool choose_next_subplan_locally (AppendState *node)
 
static bool choose_next_subplan_for_leader (AppendState *node)
 
static bool choose_next_subplan_for_worker (AppendState *node)
 
AppendStateExecInitAppend (Append *node, EState *estate, int eflags)
 
void ExecEndAppend (AppendState *node)
 
void ExecReScanAppend (AppendState *node)
 
void ExecAppendEstimate (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendReInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeWorker (AppendState *node, ParallelWorkerContext *pwcxt)
 

Macro Definition Documentation

◆ INVALID_SUBPLAN_INDEX

#define INVALID_SUBPLAN_INDEX   -1

Function Documentation

◆ choose_next_subplan_for_leader()

static bool choose_next_subplan_for_leader ( AppendState node)
static

Definition at line 392 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_pstate, AppendState::as_whichplan, Assert, EState::es_direction, Append::first_partial_plan, INVALID_SUBPLAN_INDEX, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), ParallelAppendState::pa_finished, ParallelAppendState::pa_lock, ParallelAppendState::pa_next_plan, PlanState::plan, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecAppendInitializeDSM().

393 {
394  ParallelAppendState *pstate = node->as_pstate;
395  Append *append = (Append *) node->ps.plan;
396 
397  /* Backward scan is not supported by parallel-aware plans */
399 
401 
402  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
403  {
404  /* Mark just-completed subplan as finished. */
405  node->as_pstate->pa_finished[node->as_whichplan] = true;
406  }
407  else
408  {
409  /* Start with last subplan. */
410  node->as_whichplan = node->as_nplans - 1;
411  }
412 
413  /* Loop until we find a subplan to execute. */
414  while (pstate->pa_finished[node->as_whichplan])
415  {
416  if (node->as_whichplan == 0)
417  {
420  LWLockRelease(&pstate->pa_lock);
421  return false;
422  }
423  node->as_whichplan--;
424  }
425 
426  /* If non-partial, immediately mark as finished. */
427  if (node->as_whichplan < append->first_partial_plan)
428  node->as_pstate->pa_finished[node->as_whichplan] = true;
429 
430  LWLockRelease(&pstate->pa_lock);
431 
432  return true;
433 }
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
EState * state
Definition: execnodes.h:870
ScanDirection es_direction
Definition: execnodes.h:442
PlanState ps
Definition: execnodes.h:1023
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1724
ParallelAppendState * as_pstate
Definition: execnodes.h:1027
int first_partial_plan
Definition: plannodes.h:252
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:79
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:76
Plan * plan
Definition: execnodes.h:868
#define Assert(condition)
Definition: c.h:688
int as_whichplan
Definition: execnodes.h:1026
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1120

◆ choose_next_subplan_for_worker()

static bool choose_next_subplan_for_worker ( AppendState node)
static

Definition at line 449 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_pstate, AppendState::as_whichplan, Assert, EState::es_direction, Append::first_partial_plan, INVALID_SUBPLAN_INDEX, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), ParallelAppendState::pa_finished, ParallelAppendState::pa_lock, ParallelAppendState::pa_next_plan, PlanState::plan, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecAppendInitializeWorker().

450 {
451  ParallelAppendState *pstate = node->as_pstate;
452  Append *append = (Append *) node->ps.plan;
453 
454  /* Backward scan is not supported by parallel-aware plans */
456 
458 
459  /* Mark just-completed subplan as finished. */
460  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
461  node->as_pstate->pa_finished[node->as_whichplan] = true;
462 
463  /* If all the plans are already done, we have nothing to do */
464  if (pstate->pa_next_plan == INVALID_SUBPLAN_INDEX)
465  {
466  LWLockRelease(&pstate->pa_lock);
467  return false;
468  }
469 
470  /* Save the plan from which we are starting the search. */
471  node->as_whichplan = pstate->pa_next_plan;
472 
473  /* Loop until we find a subplan to execute. */
474  while (pstate->pa_finished[pstate->pa_next_plan])
475  {
476  if (pstate->pa_next_plan < node->as_nplans - 1)
477  {
478  /* Advance to next plan. */
479  pstate->pa_next_plan++;
480  }
481  else if (node->as_whichplan > append->first_partial_plan)
482  {
483  /* Loop back to first partial plan. */
484  pstate->pa_next_plan = append->first_partial_plan;
485  }
486  else
487  {
488  /*
489  * At last plan, and either there are no partial plans or we've
490  * tried them all. Arrange to bail out.
491  */
492  pstate->pa_next_plan = node->as_whichplan;
493  }
494 
495  if (pstate->pa_next_plan == node->as_whichplan)
496  {
497  /* We've tried everything! */
499  LWLockRelease(&pstate->pa_lock);
500  return false;
501  }
502  }
503 
504  /* Pick the plan we found, and advance pa_next_plan one more time. */
505  node->as_whichplan = pstate->pa_next_plan++;
506  if (pstate->pa_next_plan >= node->as_nplans)
507  {
508  if (append->first_partial_plan < node->as_nplans)
509  pstate->pa_next_plan = append->first_partial_plan;
510  else
511  {
512  /*
513  * We have only non-partial plans, and we already chose the last
514  * one; so arrange for the other workers to immediately bail out.
515  */
517  }
518  }
519 
520  /* If non-partial, immediately mark as finished. */
521  if (node->as_whichplan < append->first_partial_plan)
522  node->as_pstate->pa_finished[node->as_whichplan] = true;
523 
524  LWLockRelease(&pstate->pa_lock);
525 
526  return true;
527 }
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
EState * state
Definition: execnodes.h:870
ScanDirection es_direction
Definition: execnodes.h:442
PlanState ps
Definition: execnodes.h:1023
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1724
ParallelAppendState * as_pstate
Definition: execnodes.h:1027
int first_partial_plan
Definition: plannodes.h:252
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:79
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:76
Plan * plan
Definition: execnodes.h:868
#define Assert(condition)
Definition: c.h:688
int as_whichplan
Definition: execnodes.h:1026
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1120

◆ choose_next_subplan_locally()

static bool choose_next_subplan_locally ( AppendState node)
static

Definition at line 360 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_whichplan, Assert, EState::es_direction, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecInitAppend().

361 {
362  int whichplan = node->as_whichplan;
363 
364  /* We should never see INVALID_SUBPLAN_INDEX in this case. */
365  Assert(whichplan >= 0 && whichplan <= node->as_nplans);
366 
368  {
369  if (whichplan >= node->as_nplans - 1)
370  return false;
371  node->as_whichplan++;
372  }
373  else
374  {
375  if (whichplan <= 0)
376  return false;
377  node->as_whichplan--;
378  }
379 
380  return true;
381 }
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
EState * state
Definition: execnodes.h:870
ScanDirection es_direction
Definition: execnodes.h:442
PlanState ps
Definition: execnodes.h:1023
#define Assert(condition)
Definition: c.h:688
int as_whichplan
Definition: execnodes.h:1026

◆ ExecAppend()

static TupleTableSlot * ExecAppend ( PlanState pstate)
static

Definition at line 178 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, AppendState::as_whichplan, Assert, castNode, CHECK_FOR_INTERRUPTS, AppendState::choose_next_subplan, ExecClearTuple(), ExecProcNode(), INVALID_SUBPLAN_INDEX, AppendState::ps, PlanState::ps_ResultTupleSlot, and TupIsNull.

Referenced by ExecInitAppend().

179 {
180  AppendState *node = castNode(AppendState, pstate);
181 
182  /* If no subplan has been chosen, we must choose one before proceeding. */
183  if (node->as_whichplan == INVALID_SUBPLAN_INDEX &&
184  !node->choose_next_subplan(node))
185  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
186 
187  for (;;)
188  {
189  PlanState *subnode;
190  TupleTableSlot *result;
191 
193 
194  /*
195  * figure out which subplan we are currently processing
196  */
197  Assert(node->as_whichplan >= 0 && node->as_whichplan < node->as_nplans);
198  subnode = node->appendplans[node->as_whichplan];
199 
200  /*
201  * get a tuple from the subplan
202  */
203  result = ExecProcNode(subnode);
204 
205  if (!TupIsNull(result))
206  {
207  /*
208  * If the subplan gave us something then return it as-is. We do
209  * NOT make use of the result slot that was set up in
210  * ExecInitAppend; there's no need for it.
211  */
212  return result;
213  }
214 
215  /* choose new subplan; if none, we're done */
216  if (!node->choose_next_subplan(node))
217  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
218  }
219 }
#define castNode(_type_, nodeptr)
Definition: nodes.h:582
TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: execTuples.c:475
PlanState ps
Definition: execnodes.h:1023
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:901
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1029
#define TupIsNull(slot)
Definition: tuptable.h:139
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:79
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:234
#define Assert(condition)
Definition: c.h:688
int as_whichplan
Definition: execnodes.h:1026
PlanState ** appendplans
Definition: execnodes.h:1024
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:98

◆ ExecAppendEstimate()

void ExecAppendEstimate ( AppendState node,
ParallelContext pcxt 
)

Definition at line 290 of file nodeAppend.c.

References add_size(), AppendState::as_nplans, ParallelContext::estimator, offsetof, ParallelAppendState::pa_finished, AppendState::pstate_len, shm_toc_estimate_chunk, and shm_toc_estimate_keys.

Referenced by ExecParallelEstimate().

292 {
293  node->pstate_len =
294  add_size(offsetof(ParallelAppendState, pa_finished),
295  sizeof(bool) * node->as_nplans);
296 
298  shm_toc_estimate_keys(&pcxt->estimator, 1);
299 }
Size pstate_len
Definition: execnodes.h:1028
shm_toc_estimator estimator
Definition: parallel.h:41
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
#define offsetof(type, field)
Definition: c.h:611

◆ ExecAppendInitializeDSM()

void ExecAppendInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 309 of file nodeAppend.c.

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_leader(), LWLockInitialize(), LWTRANCHE_PARALLEL_APPEND, ParallelAppendState::pa_lock, PlanState::plan, Plan::plan_node_id, AppendState::ps, AppendState::pstate_len, shm_toc_allocate(), shm_toc_insert(), and ParallelContext::toc.

Referenced by ExecParallelInitializeDSM().

311 {
312  ParallelAppendState *pstate;
313 
314  pstate = shm_toc_allocate(pcxt->toc, node->pstate_len);
315  memset(pstate, 0, node->pstate_len);
317  shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate);
318 
319  node->as_pstate = pstate;
321 }
Size pstate_len
Definition: execnodes.h:1028
int plan_node_id
Definition: plannodes.h:143
PlanState ps
Definition: execnodes.h:1023
ParallelAppendState * as_pstate
Definition: execnodes.h:1027
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1029
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:676
Plan * plan
Definition: execnodes.h:868
static bool choose_next_subplan_for_leader(AppendState *node)
Definition: nodeAppend.c:392
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
shm_toc * toc
Definition: parallel.h:44

◆ ExecAppendInitializeWorker()

void ExecAppendInitializeWorker ( AppendState node,
ParallelWorkerContext pwcxt 
)

Definition at line 346 of file nodeAppend.c.

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_worker(), PlanState::plan, Plan::plan_node_id, AppendState::ps, shm_toc_lookup(), and ParallelWorkerContext::toc.

Referenced by ExecParallelInitializeWorker().

347 {
348  node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
350 }
int plan_node_id
Definition: plannodes.h:143
PlanState ps
Definition: execnodes.h:1023
ParallelAppendState * as_pstate
Definition: execnodes.h:1027
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1029
static bool choose_next_subplan_for_worker(AppendState *node)
Definition: nodeAppend.c:449
Plan * plan
Definition: execnodes.h:868
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232

◆ ExecAppendReInitializeDSM()

void ExecAppendReInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 330 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_pstate, ParallelAppendState::pa_finished, and ParallelAppendState::pa_next_plan.

Referenced by ExecParallelReInitializeDSM().

331 {
332  ParallelAppendState *pstate = node->as_pstate;
333 
334  pstate->pa_next_plan = 0;
335  memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans);
336 }
ParallelAppendState * as_pstate
Definition: execnodes.h:1027
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:76

◆ ExecEndAppend()

void ExecEndAppend ( AppendState node)

Definition at line 230 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, ExecEndNode(), and i.

Referenced by ExecEndNode().

231 {
232  PlanState **appendplans;
233  int nplans;
234  int i;
235 
236  /*
237  * get information from the node
238  */
239  appendplans = node->appendplans;
240  nplans = node->as_nplans;
241 
242  /*
243  * shut down each of the subscans
244  */
245  for (i = 0; i < nplans; i++)
246  ExecEndNode(appendplans[i]);
247 }
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:539
PlanState ** appendplans
Definition: execnodes.h:1024
int i

◆ ExecInitAppend()

AppendState* ExecInitAppend ( Append node,
EState estate,
int  eflags 
)

Definition at line 98 of file nodeAppend.c.

References Append::appendplans, AppendState::appendplans, AppendState::as_nplans, AppendState::as_whichplan, Assert, AppendState::choose_next_subplan, choose_next_subplan_locally(), EXEC_FLAG_MARK, ExecAppend(), ExecInitNode(), ExecInitResultTupleSlotTL(), ExecLockNonLeafAppendTables(), PlanState::ExecProcNode, i, INVALID_SUBPLAN_INDEX, lfirst, list_length(), makeNode, palloc0(), Plan::parallel_aware, Append::partitioned_rels, PlanState::plan, AppendState::ps, PlanState::ps_ProjInfo, and PlanState::state.

Referenced by ExecInitNode().

99 {
100  AppendState *appendstate = makeNode(AppendState);
101  PlanState **appendplanstates;
102  int nplans;
103  int i;
104  ListCell *lc;
105 
106  /* check for unsupported flags */
107  Assert(!(eflags & EXEC_FLAG_MARK));
108 
109  /*
110  * Lock the non-leaf tables in the partition tree controlled by this node.
111  * It's a no-op for non-partitioned parent tables.
112  */
114 
115  /*
116  * Set up empty vector of subplan states
117  */
118  nplans = list_length(node->appendplans);
119 
120  appendplanstates = (PlanState **) palloc0(nplans * sizeof(PlanState *));
121 
122  /*
123  * create new AppendState for our append node
124  */
125  appendstate->ps.plan = (Plan *) node;
126  appendstate->ps.state = estate;
127  appendstate->ps.ExecProcNode = ExecAppend;
128  appendstate->appendplans = appendplanstates;
129  appendstate->as_nplans = nplans;
130 
131  /*
132  * Initialize result tuple type and slot.
133  */
134  ExecInitResultTupleSlotTL(estate, &appendstate->ps);
135 
136  /*
137  * call ExecInitNode on each of the plans to be executed and save the
138  * results into the array "appendplans".
139  */
140  i = 0;
141  foreach(lc, node->appendplans)
142  {
143  Plan *initNode = (Plan *) lfirst(lc);
144 
145  appendplanstates[i] = ExecInitNode(initNode, estate, eflags);
146  i++;
147  }
148 
149  /*
150  * Miscellaneous initialization
151  *
152  * Append plans don't have expression contexts because they never call
153  * ExecQual or ExecProject.
154  */
155  appendstate->ps.ps_ProjInfo = NULL;
156 
157  /*
158  * Parallel-aware append plans must choose the first subplan to execute by
159  * looking at shared memory, but non-parallel-aware append plans can
160  * always start with the first subplan.
161  */
162  appendstate->as_whichplan =
163  appendstate->ps.plan->parallel_aware ? INVALID_SUBPLAN_INDEX : 0;
164 
165  /* If parallel-aware, this will be overridden later. */
167 
168  return appendstate;
169 }
void ExecLockNonLeafAppendTables(List *partitioned_rels, EState *estate)
Definition: execUtils.c:861
static TupleTableSlot * ExecAppend(PlanState *pstate)
Definition: nodeAppend.c:178
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:903
static bool choose_next_subplan_locally(AppendState *node)
Definition: nodeAppend.c:360
EState * state
Definition: execnodes.h:870
PlanState ps
Definition: execnodes.h:1023
List * appendplans
Definition: plannodes.h:251
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1029
bool parallel_aware
Definition: plannodes.h:137
List * partitioned_rels
Definition: plannodes.h:250
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:79
void * palloc0(Size size)
Definition: mcxt.c:864
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:874
void ExecInitResultTupleSlotTL(EState *estate, PlanState *planstate)
Definition: execTuples.c:870
Plan * plan
Definition: execnodes.h:868
#define makeNode(_type_)
Definition: nodes.h:561
#define Assert(condition)
Definition: c.h:688
#define lfirst(lc)
Definition: pg_list.h:106
#define EXEC_FLAG_MARK
Definition: executor.h:62
int as_whichplan
Definition: execnodes.h:1026
static int list_length(const List *l)
Definition: pg_list.h:89
PlanState ** appendplans
Definition: execnodes.h:1024
int i
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:139

◆ ExecReScanAppend()

void ExecReScanAppend ( AppendState node)

Definition at line 250 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, AppendState::as_whichplan, PlanState::chgParam, ExecReScan(), i, INVALID_SUBPLAN_INDEX, Plan::parallel_aware, PlanState::plan, AppendState::ps, and UpdateChangedParamSet().

Referenced by ExecReScan().

251 {
252  int i;
253 
254  for (i = 0; i < node->as_nplans; i++)
255  {
256  PlanState *subnode = node->appendplans[i];
257 
258  /*
259  * ExecReScan doesn't know about my subplans, so I have to do
260  * changed-parameter signaling myself.
261  */
262  if (node->ps.chgParam != NULL)
263  UpdateChangedParamSet(subnode, node->ps.chgParam);
264 
265  /*
266  * If chgParam of subnode is not null then plan will be re-scanned by
267  * first ExecProcNode.
268  */
269  if (subnode->chgParam == NULL)
270  ExecReScan(subnode);
271  }
272 
273  node->as_whichplan =
275 }
void ExecReScan(PlanState *node)
Definition: execAmi.c:76
PlanState ps
Definition: execnodes.h:1023
bool parallel_aware
Definition: plannodes.h:137
Bitmapset * chgParam
Definition: execnodes.h:896
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:79
Plan * plan
Definition: execnodes.h:868
void UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
Definition: execUtils.c:706
int as_whichplan
Definition: execnodes.h:1026
PlanState ** appendplans
Definition: execnodes.h:1024
int i