PostgreSQL Source Code  git master
nodeAppend.c File Reference
#include "postgres.h"
#include "executor/execdebug.h"
#include "executor/nodeAppend.h"
#include "miscadmin.h"
Include dependency graph for nodeAppend.c:

Go to the source code of this file.

Data Structures

struct  ParallelAppendState
 

Macros

#define INVALID_SUBPLAN_INDEX   -1
 

Functions

static TupleTableSlotExecAppend (PlanState *pstate)
 
static bool choose_next_subplan_locally (AppendState *node)
 
static bool choose_next_subplan_for_leader (AppendState *node)
 
static bool choose_next_subplan_for_worker (AppendState *node)
 
AppendStateExecInitAppend (Append *node, EState *estate, int eflags)
 
void ExecEndAppend (AppendState *node)
 
void ExecReScanAppend (AppendState *node)
 
void ExecAppendEstimate (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendReInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeWorker (AppendState *node, ParallelWorkerContext *pwcxt)
 

Macro Definition Documentation

◆ INVALID_SUBPLAN_INDEX

#define INVALID_SUBPLAN_INDEX   -1

Function Documentation

◆ choose_next_subplan_for_leader()

static bool choose_next_subplan_for_leader ( AppendState node)
static

Definition at line 398 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_pstate, AppendState::as_whichplan, Assert, EState::es_direction, Append::first_partial_plan, INVALID_SUBPLAN_INDEX, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), ParallelAppendState::pa_finished, ParallelAppendState::pa_lock, ParallelAppendState::pa_next_plan, PlanState::plan, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecAppendInitializeDSM().

399 {
400  ParallelAppendState *pstate = node->as_pstate;
401  Append *append = (Append *) node->ps.plan;
402 
403  /* Backward scan is not supported by parallel-aware plans */
405 
407 
408  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
409  {
410  /* Mark just-completed subplan as finished. */
411  node->as_pstate->pa_finished[node->as_whichplan] = true;
412  }
413  else
414  {
415  /* Start with last subplan. */
416  node->as_whichplan = node->as_nplans - 1;
417  }
418 
419  /* Loop until we find a subplan to execute. */
420  while (pstate->pa_finished[node->as_whichplan])
421  {
422  if (node->as_whichplan == 0)
423  {
426  LWLockRelease(&pstate->pa_lock);
427  return false;
428  }
429  node->as_whichplan--;
430  }
431 
432  /* If non-partial, immediately mark as finished. */
433  if (node->as_whichplan < append->first_partial_plan)
434  node->as_pstate->pa_finished[node->as_whichplan] = true;
435 
436  LWLockRelease(&pstate->pa_lock);
437 
438  return true;
439 }
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
EState * state
Definition: execnodes.h:852
ScanDirection es_direction
Definition: execnodes.h:429
PlanState ps
Definition: execnodes.h:1012
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1722
ParallelAppendState * as_pstate
Definition: execnodes.h:1016
int first_partial_plan
Definition: plannodes.h:251
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:79
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:76
Plan * plan
Definition: execnodes.h:850
#define Assert(condition)
Definition: c.h:670
int as_whichplan
Definition: execnodes.h:1015
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1118

◆ choose_next_subplan_for_worker()

static bool choose_next_subplan_for_worker ( AppendState node)
static

Definition at line 456 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_pstate, AppendState::as_whichplan, Assert, EState::es_direction, Append::first_partial_plan, INVALID_SUBPLAN_INDEX, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), ParallelAppendState::pa_finished, ParallelAppendState::pa_lock, ParallelAppendState::pa_next_plan, PlanState::plan, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecAppendInitializeWorker().

457 {
458  ParallelAppendState *pstate = node->as_pstate;
459  Append *append = (Append *) node->ps.plan;
460 
461  /* Backward scan is not supported by parallel-aware plans */
463 
465 
466  /* Mark just-completed subplan as finished. */
467  if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
468  node->as_pstate->pa_finished[node->as_whichplan] = true;
469 
470  /* If all the plans are already done, we have nothing to do */
471  if (pstate->pa_next_plan == INVALID_SUBPLAN_INDEX)
472  {
473  LWLockRelease(&pstate->pa_lock);
474  return false;
475  }
476 
477  /* Loop until we find a subplan to execute. */
478  while (pstate->pa_finished[pstate->pa_next_plan])
479  {
480  if (pstate->pa_next_plan < node->as_nplans - 1)
481  {
482  /* Advance to next plan. */
483  pstate->pa_next_plan++;
484  }
485  else if (append->first_partial_plan < node->as_nplans)
486  {
487  /* Loop back to first partial plan. */
488  pstate->pa_next_plan = append->first_partial_plan;
489  }
490  else
491  {
492  /* At last plan, no partial plans, arrange to bail out. */
493  pstate->pa_next_plan = node->as_whichplan;
494  }
495 
496  if (pstate->pa_next_plan == node->as_whichplan)
497  {
498  /* We've tried everything! */
500  LWLockRelease(&pstate->pa_lock);
501  return false;
502  }
503  }
504 
505  /* Pick the plan we found, and advance pa_next_plan one more time. */
506  node->as_whichplan = pstate->pa_next_plan++;
507  if (pstate->pa_next_plan >= node->as_nplans)
508  {
509  if (append->first_partial_plan < node->as_nplans)
510  pstate->pa_next_plan = append->first_partial_plan;
511  else
512  {
513  /*
514  * We have only non-partial plans, and we already chose the last
515  * one; so arrange for the other workers to immediately bail out.
516  */
518  }
519  }
520 
521  /* If non-partial, immediately mark as finished. */
522  if (node->as_whichplan < append->first_partial_plan)
523  node->as_pstate->pa_finished[node->as_whichplan] = true;
524 
525  LWLockRelease(&pstate->pa_lock);
526 
527  return true;
528 }
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
EState * state
Definition: execnodes.h:852
ScanDirection es_direction
Definition: execnodes.h:429
PlanState ps
Definition: execnodes.h:1012
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1722
ParallelAppendState * as_pstate
Definition: execnodes.h:1016
int first_partial_plan
Definition: plannodes.h:251
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:79
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:76
Plan * plan
Definition: execnodes.h:850
#define Assert(condition)
Definition: c.h:670
int as_whichplan
Definition: execnodes.h:1015
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1118

◆ choose_next_subplan_locally()

static bool choose_next_subplan_locally ( AppendState node)
static

Definition at line 366 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_whichplan, Assert, EState::es_direction, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecInitAppend().

367 {
368  int whichplan = node->as_whichplan;
369 
370  /* We should never see INVALID_SUBPLAN_INDEX in this case. */
371  Assert(whichplan >= 0 && whichplan <= node->as_nplans);
372 
374  {
375  if (whichplan >= node->as_nplans - 1)
376  return false;
377  node->as_whichplan++;
378  }
379  else
380  {
381  if (whichplan <= 0)
382  return false;
383  node->as_whichplan--;
384  }
385 
386  return true;
387 }
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
EState * state
Definition: execnodes.h:852
ScanDirection es_direction
Definition: execnodes.h:429
PlanState ps
Definition: execnodes.h:1012
#define Assert(condition)
Definition: c.h:670
int as_whichplan
Definition: execnodes.h:1015

◆ ExecAppend()

static TupleTableSlot * ExecAppend ( PlanState pstate)
static

Definition at line 184 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, AppendState::as_whichplan, Assert, castNode, CHECK_FOR_INTERRUPTS, AppendState::choose_next_subplan, ExecClearTuple(), ExecProcNode(), INVALID_SUBPLAN_INDEX, AppendState::ps, PlanState::ps_ResultTupleSlot, and TupIsNull.

Referenced by ExecInitAppend().

185 {
186  AppendState *node = castNode(AppendState, pstate);
187 
188  /* If no subplan has been chosen, we must choose one before proceeding. */
189  if (node->as_whichplan == INVALID_SUBPLAN_INDEX &&
190  !node->choose_next_subplan(node))
191  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
192 
193  for (;;)
194  {
195  PlanState *subnode;
196  TupleTableSlot *result;
197 
199 
200  /*
201  * figure out which subplan we are currently processing
202  */
203  Assert(node->as_whichplan >= 0 && node->as_whichplan < node->as_nplans);
204  subnode = node->appendplans[node->as_whichplan];
205 
206  /*
207  * get a tuple from the subplan
208  */
209  result = ExecProcNode(subnode);
210 
211  if (!TupIsNull(result))
212  {
213  /*
214  * If the subplan gave us something then return it as-is. We do
215  * NOT make use of the result slot that was set up in
216  * ExecInitAppend; there's no need for it.
217  */
218  return result;
219  }
220 
221  /* choose new subplan; if none, we're done */
222  if (!node->choose_next_subplan(node))
223  return ExecClearTuple(node->ps.ps_ResultTupleSlot);
224  }
225 }
#define castNode(_type_, nodeptr)
Definition: nodes.h:581
TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: execTuples.c:439
PlanState ps
Definition: execnodes.h:1012
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:883
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1018
#define TupIsNull(slot)
Definition: tuptable.h:138
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:79
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:236
#define Assert(condition)
Definition: c.h:670
int as_whichplan
Definition: execnodes.h:1015
PlanState ** appendplans
Definition: execnodes.h:1013
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:98

◆ ExecAppendEstimate()

void ExecAppendEstimate ( AppendState node,
ParallelContext pcxt 
)

Definition at line 296 of file nodeAppend.c.

References add_size(), AppendState::as_nplans, ParallelContext::estimator, offsetof, ParallelAppendState::pa_finished, AppendState::pstate_len, shm_toc_estimate_chunk, and shm_toc_estimate_keys.

Referenced by ExecParallelEstimate().

298 {
299  node->pstate_len =
300  add_size(offsetof(ParallelAppendState, pa_finished),
301  sizeof(bool) * node->as_nplans);
302 
304  shm_toc_estimate_keys(&pcxt->estimator, 1);
305 }
Size pstate_len
Definition: execnodes.h:1017
shm_toc_estimator estimator
Definition: parallel.h:41
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
#define offsetof(type, field)
Definition: c.h:593

◆ ExecAppendInitializeDSM()

void ExecAppendInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 315 of file nodeAppend.c.

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_leader(), LWLockInitialize(), LWTRANCHE_PARALLEL_APPEND, ParallelAppendState::pa_lock, PlanState::plan, Plan::plan_node_id, AppendState::ps, AppendState::pstate_len, shm_toc_allocate(), shm_toc_insert(), and ParallelContext::toc.

Referenced by ExecParallelInitializeDSM().

317 {
318  ParallelAppendState *pstate;
319 
320  pstate = shm_toc_allocate(pcxt->toc, node->pstate_len);
321  memset(pstate, 0, node->pstate_len);
323  shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate);
324 
325  node->as_pstate = pstate;
327 }
Size pstate_len
Definition: execnodes.h:1017
int plan_node_id
Definition: plannodes.h:143
PlanState ps
Definition: execnodes.h:1012
ParallelAppendState * as_pstate
Definition: execnodes.h:1016
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1018
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:674
Plan * plan
Definition: execnodes.h:850
static bool choose_next_subplan_for_leader(AppendState *node)
Definition: nodeAppend.c:398
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
shm_toc * toc
Definition: parallel.h:44

◆ ExecAppendInitializeWorker()

void ExecAppendInitializeWorker ( AppendState node,
ParallelWorkerContext pwcxt 
)

Definition at line 352 of file nodeAppend.c.

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_worker(), PlanState::plan, Plan::plan_node_id, AppendState::ps, shm_toc_lookup(), and ParallelWorkerContext::toc.

Referenced by ExecParallelInitializeWorker().

353 {
354  node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
356 }
int plan_node_id
Definition: plannodes.h:143
PlanState ps
Definition: execnodes.h:1012
ParallelAppendState * as_pstate
Definition: execnodes.h:1016
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1018
static bool choose_next_subplan_for_worker(AppendState *node)
Definition: nodeAppend.c:456
Plan * plan
Definition: execnodes.h:850
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232

◆ ExecAppendReInitializeDSM()

void ExecAppendReInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 336 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_pstate, ParallelAppendState::pa_finished, and ParallelAppendState::pa_next_plan.

Referenced by ExecParallelReInitializeDSM().

337 {
338  ParallelAppendState *pstate = node->as_pstate;
339 
340  pstate->pa_next_plan = 0;
341  memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans);
342 }
ParallelAppendState * as_pstate
Definition: execnodes.h:1016
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:76

◆ ExecEndAppend()

void ExecEndAppend ( AppendState node)

Definition at line 236 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, ExecEndNode(), and i.

Referenced by ExecEndNode().

237 {
238  PlanState **appendplans;
239  int nplans;
240  int i;
241 
242  /*
243  * get information from the node
244  */
245  appendplans = node->appendplans;
246  nplans = node->as_nplans;
247 
248  /*
249  * shut down each of the subscans
250  */
251  for (i = 0; i < nplans; i++)
252  ExecEndNode(appendplans[i]);
253 }
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:523
PlanState ** appendplans
Definition: execnodes.h:1013
int i

◆ ExecInitAppend()

AppendState* ExecInitAppend ( Append node,
EState estate,
int  eflags 
)

Definition at line 98 of file nodeAppend.c.

References Append::appendplans, AppendState::appendplans, AppendState::as_nplans, AppendState::as_whichplan, Assert, AppendState::choose_next_subplan, choose_next_subplan_locally(), EXEC_FLAG_MARK, ExecAppend(), ExecAssignResultTypeFromTL(), ExecInitNode(), ExecInitResultTupleSlot(), ExecLockNonLeafAppendTables(), PlanState::ExecProcNode, i, INVALID_SUBPLAN_INDEX, lfirst, list_length(), makeNode, palloc0(), Plan::parallel_aware, Append::partitioned_rels, PlanState::plan, AppendState::ps, PlanState::ps_ProjInfo, and PlanState::state.

Referenced by ExecInitNode().

99 {
100  AppendState *appendstate = makeNode(AppendState);
101  PlanState **appendplanstates;
102  int nplans;
103  int i;
104  ListCell *lc;
105 
106  /* check for unsupported flags */
107  Assert(!(eflags & EXEC_FLAG_MARK));
108 
109  /*
110  * Lock the non-leaf tables in the partition tree controlled by this node.
111  * It's a no-op for non-partitioned parent tables.
112  */
114 
115  /*
116  * Set up empty vector of subplan states
117  */
118  nplans = list_length(node->appendplans);
119 
120  appendplanstates = (PlanState **) palloc0(nplans * sizeof(PlanState *));
121 
122  /*
123  * create new AppendState for our append node
124  */
125  appendstate->ps.plan = (Plan *) node;
126  appendstate->ps.state = estate;
127  appendstate->ps.ExecProcNode = ExecAppend;
128  appendstate->appendplans = appendplanstates;
129  appendstate->as_nplans = nplans;
130 
131  /*
132  * Miscellaneous initialization
133  *
134  * Append plans don't have expression contexts because they never call
135  * ExecQual or ExecProject.
136  */
137 
138  /*
139  * append nodes still have Result slots, which hold pointers to tuples, so
140  * we have to initialize them.
141  */
142  ExecInitResultTupleSlot(estate, &appendstate->ps);
143 
144  /*
145  * call ExecInitNode on each of the plans to be executed and save the
146  * results into the array "appendplans".
147  */
148  i = 0;
149  foreach(lc, node->appendplans)
150  {
151  Plan *initNode = (Plan *) lfirst(lc);
152 
153  appendplanstates[i] = ExecInitNode(initNode, estate, eflags);
154  i++;
155  }
156 
157  /*
158  * initialize output tuple type
159  */
160  ExecAssignResultTypeFromTL(&appendstate->ps);
161  appendstate->ps.ps_ProjInfo = NULL;
162 
163  /*
164  * Parallel-aware append plans must choose the first subplan to execute by
165  * looking at shared memory, but non-parallel-aware append plans can
166  * always start with the first subplan.
167  */
168  appendstate->as_whichplan =
169  appendstate->ps.plan->parallel_aware ? INVALID_SUBPLAN_INDEX : 0;
170 
171  /* If parallel-aware, this will be overridden later. */
173 
174  return appendstate;
175 }
void ExecLockNonLeafAppendTables(List *partitioned_rels, EState *estate)
Definition: execUtils.c:913
static TupleTableSlot * ExecAppend(PlanState *pstate)
Definition: nodeAppend.c:184
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:885
static bool choose_next_subplan_locally(AppendState *node)
Definition: nodeAppend.c:366
EState * state
Definition: execnodes.h:852
void ExecAssignResultTypeFromTL(PlanState *planstate)
Definition: execUtils.c:448
PlanState ps
Definition: execnodes.h:1012
List * appendplans
Definition: plannodes.h:250
void ExecInitResultTupleSlot(EState *estate, PlanState *planstate)
Definition: execTuples.c:832
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1018
bool parallel_aware
Definition: plannodes.h:137
List * partitioned_rels
Definition: plannodes.h:249
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:79
void * palloc0(Size size)
Definition: mcxt.c:877
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:856
Plan * plan
Definition: execnodes.h:850
#define makeNode(_type_)
Definition: nodes.h:560
#define Assert(condition)
Definition: c.h:670
#define lfirst(lc)
Definition: pg_list.h:106
#define EXEC_FLAG_MARK
Definition: executor.h:61
int as_whichplan
Definition: execnodes.h:1015
static int list_length(const List *l)
Definition: pg_list.h:89
PlanState ** appendplans
Definition: execnodes.h:1013
int i
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:139

◆ ExecReScanAppend()

void ExecReScanAppend ( AppendState node)

Definition at line 256 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, AppendState::as_whichplan, PlanState::chgParam, ExecReScan(), i, INVALID_SUBPLAN_INDEX, Plan::parallel_aware, PlanState::plan, AppendState::ps, and UpdateChangedParamSet().

Referenced by ExecReScan().

257 {
258  int i;
259 
260  for (i = 0; i < node->as_nplans; i++)
261  {
262  PlanState *subnode = node->appendplans[i];
263 
264  /*
265  * ExecReScan doesn't know about my subplans, so I have to do
266  * changed-parameter signaling myself.
267  */
268  if (node->ps.chgParam != NULL)
269  UpdateChangedParamSet(subnode, node->ps.chgParam);
270 
271  /*
272  * If chgParam of subnode is not null then plan will be re-scanned by
273  * first ExecProcNode.
274  */
275  if (subnode->chgParam == NULL)
276  ExecReScan(subnode);
277  }
278 
279  node->as_whichplan =
281 }
void ExecReScan(PlanState *node)
Definition: execAmi.c:76
PlanState ps
Definition: execnodes.h:1012
bool parallel_aware
Definition: plannodes.h:137
Bitmapset * chgParam
Definition: execnodes.h:878
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:79
Plan * plan
Definition: execnodes.h:850
void UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
Definition: execUtils.c:758
int as_whichplan
Definition: execnodes.h:1015
PlanState ** appendplans
Definition: execnodes.h:1013
int i